fault package funcs rename (#2583)
## Description Renaming the funcs in the fault package to be more clear about their purpose and behavior. Largely just find&replace changes, except for fault.go and the fault examples. ## Does this PR need a docs update or release note? - [x] ⛔ No ## Type of change - [x] 🧹 Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
422a05e21d
commit
9e783efe3a
@ -314,8 +314,8 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
bups, ferrs := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
// TODO: print/log recoverable errors
|
// TODO: print/log recoverable errors
|
||||||
if ferrs.Err() != nil {
|
if ferrs.Failure() != nil {
|
||||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
@ -492,7 +492,7 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
// errs.Err() == nil.
|
// errs.Failure() == nil.
|
||||||
func runDetailsExchangeCmd(
|
func runDetailsExchangeCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -505,12 +505,12 @@ func runDetailsExchangeCmd(
|
|||||||
|
|
||||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
// TODO: log/track recoverable errors
|
// TODO: log/track recoverable errors
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("No backup exists with the id %s", backupID)
|
return nil, errors.Errorf("No backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||||
|
|||||||
@ -288,8 +288,8 @@ func (suite *PreparedBackupExchangeIntegrationSuite) SetupSuite() {
|
|||||||
require.NoError(t, err, "retrieving recent backup by ID")
|
require.NoError(t, err, "retrieving recent backup by ID")
|
||||||
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
|
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
|
||||||
_, b, errs := suite.repo.BackupDetails(ctx, bIDs)
|
_, b, errs := suite.repo.BackupDetails(ctx, bIDs)
|
||||||
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
require.NoError(t, errs.Failure(), "retrieving recent backup details by ID")
|
||||||
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
require.Empty(t, errs.Recovered(), "retrieving recent backup details by ID")
|
||||||
require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
|
require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
|
||||||
|
|
||||||
suite.backupOps[set] = string(b.ID)
|
suite.backupOps[set] = string(b.ID)
|
||||||
@ -397,8 +397,8 @@ func (suite *PreparedBackupExchangeIntegrationSuite) TestExchangeDetailsCmd() {
|
|||||||
|
|
||||||
// fetch the details from the repo first
|
// fetch the details from the repo first
|
||||||
deets, _, errs := suite.repo.BackupDetails(ctx, string(bID))
|
deets, _, errs := suite.repo.BackupDetails(ctx, string(bID))
|
||||||
require.NoError(t, errs.Err())
|
require.NoError(t, errs.Failure())
|
||||||
require.Empty(t, errs.Errs())
|
require.Empty(t, errs.Recovered())
|
||||||
|
|
||||||
cmd := tester.StubRootCmd(
|
cmd := tester.StubRootCmd(
|
||||||
"backup", "details", "exchange",
|
"backup", "details", "exchange",
|
||||||
|
|||||||
@ -237,8 +237,8 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
bups, ferrs := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
// TODO: print/log recoverable errors
|
// TODO: print/log recoverable errors
|
||||||
if ferrs.Err() != nil {
|
if ferrs.Failure() != nil {
|
||||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
@ -384,7 +384,7 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
// errs.Err() == nil.
|
// errs.Failure() == nil.
|
||||||
func runDetailsOneDriveCmd(
|
func runDetailsOneDriveCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -397,12 +397,12 @@ func runDetailsOneDriveCmd(
|
|||||||
|
|
||||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
// TODO: log/track recoverable errors
|
// TODO: log/track recoverable errors
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||||
|
|||||||
@ -257,8 +257,8 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
bups, ferrs := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
// TODO: print/log recoverable errors
|
// TODO: print/log recoverable errors
|
||||||
if ferrs.Err() != nil {
|
if ferrs.Failure() != nil {
|
||||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
@ -512,7 +512,7 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
// errs.Err() == nil.
|
// errs.Failure() == nil.
|
||||||
func runDetailsSharePointCmd(
|
func runDetailsSharePointCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -525,12 +525,12 @@ func runDetailsSharePointCmd(
|
|||||||
|
|
||||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
// TODO: log/track recoverable errors
|
// TODO: log/track recoverable errors
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
||||||
|
|||||||
@ -110,8 +110,8 @@ func (suite *RestoreExchangeIntegrationSuite) SetupSuite() {
|
|||||||
require.NoError(t, err, "retrieving recent backup by ID")
|
require.NoError(t, err, "retrieving recent backup by ID")
|
||||||
|
|
||||||
_, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
_, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
||||||
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
require.NoError(t, errs.Failure(), "retrieving recent backup details by ID")
|
||||||
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
require.Empty(t, errs.Recovered(), "retrieving recent backup details by ID")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
src/cli/utils/testdata/opts.go
vendored
4
src/cli/utils/testdata/opts.go
vendored
@ -501,7 +501,7 @@ func (MockBackupGetter) Backup(
|
|||||||
func (MockBackupGetter) Backups(
|
func (MockBackupGetter) Backups(
|
||||||
context.Context,
|
context.Context,
|
||||||
[]model.StableID,
|
[]model.StableID,
|
||||||
) ([]*backup.Backup, *fault.Errors) {
|
) ([]*backup.Backup, *fault.Bus) {
|
||||||
return nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
return nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,7 +515,7 @@ func (MockBackupGetter) BackupsByTag(
|
|||||||
func (bg *MockBackupGetter) BackupDetails(
|
func (bg *MockBackupGetter) BackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupID string,
|
backupID string,
|
||||||
) (*details.Details, *backup.Backup, *fault.Errors) {
|
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||||
if bg == nil {
|
if bg == nil {
|
||||||
return testdata.GetDetailsSet(), nil, fault.New(true)
|
return testdata.GetDetailsSet(), nil, fault.New(true)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -53,7 +53,7 @@ func generateAndRestoreItems(
|
|||||||
howMany int,
|
howMany int,
|
||||||
dbf dataBuilderFunc,
|
dbf dataBuilderFunc,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.Details, error) {
|
) (*details.Details, error) {
|
||||||
items := make([]item, 0, howMany)
|
items := make([]item, 0, howMany)
|
||||||
|
|
||||||
|
|||||||
@ -79,7 +79,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log := logger.Ctx(ctx)
|
log := logger.Ctx(ctx)
|
||||||
for _, e := range errs.Errs() {
|
for _, e := range errs.Recovered() {
|
||||||
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
log := logger.Ctx(ctx)
|
log := logger.Ctx(ctx)
|
||||||
for _, e := range errs.Errs() {
|
for _, e := range errs.Recovered() {
|
||||||
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log := logger.Ctx(ctx)
|
log := logger.Ctx(ctx)
|
||||||
for _, e := range errs.Errs() {
|
for _, e := range errs.Recovered() {
|
||||||
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
log.Errorw(e.Error(), clues.InErr(err).Slice()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -93,7 +93,7 @@ func runDisplayM365JSON(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
var (
|
var (
|
||||||
bs []byte
|
bs []byte
|
||||||
@ -143,7 +143,7 @@ type itemer interface {
|
|||||||
GetItem(
|
GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error)
|
) (serialization.Parsable, *details.ExchangeInfo, error)
|
||||||
Serialize(
|
Serialize(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@ -156,7 +156,7 @@ func getItem(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
itm itemer,
|
itm itemer,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
sp, _, err := itm.GetItem(ctx, user, itemID, errs)
|
sp, _, err := itm.GetItem(ctx, user, itemID, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -37,7 +37,7 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
sels selectors.Selector,
|
sels selectors.Selector,
|
||||||
metadata []data.RestoreCollection,
|
metadata []data.RestoreCollection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
||||||
defer end()
|
defer end()
|
||||||
@ -198,7 +198,7 @@ func (gc *GraphConnector) RestoreDataCollections(
|
|||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.Details, error) {
|
) (*details.Details, error) {
|
||||||
ctx, end := D.Span(ctx, "connector:restore")
|
ctx, end := D.Span(ctx, "connector:restore")
|
||||||
defer end()
|
defer end()
|
||||||
|
|||||||
@ -86,7 +86,7 @@ func userOptions(fs *string) *users.UsersRequestBuilderGetRequestConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetAll retrieves all users.
|
// GetAll retrieves all users.
|
||||||
func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userable, error) {
|
func (c Users) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Userable, error) {
|
||||||
service, err := c.service()
|
service, err := c.service()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -110,17 +110,17 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userabl
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
us = make([]models.Userable, 0)
|
us = make([]models.Userable, 0)
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
iterator := func(item any) bool {
|
iterator := func(item any) bool {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := validateUser(item)
|
u, err := validateUser(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "validating user").WithClues(ctx).With(graph.ErrData(err)...))
|
el.AddRecoverable(clues.Wrap(err, "validating user").WithClues(ctx).With(graph.ErrData(err)...))
|
||||||
} else {
|
} else {
|
||||||
us = append(us, u)
|
us = append(us, u)
|
||||||
}
|
}
|
||||||
@ -132,7 +132,7 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userabl
|
|||||||
return nil, clues.Wrap(err, "iterating all users").WithClues(ctx).With(graph.ErrData(err)...)
|
return nil, clues.Wrap(err, "iterating all users").WithClues(ctx).With(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return us, et.Err()
|
return us, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) {
|
func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) {
|
||||||
|
|||||||
@ -17,7 +17,7 @@ import (
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type getAller interface {
|
type getAller interface {
|
||||||
GetAll(context.Context, *fault.Errors) ([]models.Userable, error)
|
GetAll(context.Context, *fault.Bus) ([]models.Userable, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type getter interface {
|
type getter interface {
|
||||||
@ -38,7 +38,7 @@ type getWithInfoer interface {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Users fetches all users in the tenant.
|
// Users fetches all users in the tenant.
|
||||||
func Users(ctx context.Context, ga getAller, errs *fault.Errors) ([]models.Userable, error) {
|
func Users(ctx context.Context, ga getAller, errs *fault.Bus) ([]models.Userable, error) {
|
||||||
return ga.GetAll(ctx, errs)
|
return ga.GetAll(ctx, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -72,7 +72,7 @@ func (c Contacts) DeleteContainer(
|
|||||||
func (c Contacts) GetItem(
|
func (c Contacts) GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
_ *fault.Errors, // no attachments to iterate over, so this goes unused
|
_ *fault.Bus, // no attachments to iterate over, so this goes unused
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil)
|
cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -109,7 +109,7 @@ func (c Contacts) EnumerateContainers(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, baseDirID string,
|
userID, baseDirID string,
|
||||||
fn func(graph.CacheFolder) error,
|
fn func(graph.CacheFolder) error,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
service, err := c.service()
|
service, err := c.service()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -126,14 +126,14 @@ func (c Contacts) EnumerateContainers(
|
|||||||
With("options_fields", fields)
|
With("options_fields", fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
builder := service.Client().
|
builder := service.Client().
|
||||||
UsersById(userID).
|
UsersById(userID).
|
||||||
ContactFoldersById(baseDirID).
|
ContactFoldersById(baseDirID).
|
||||||
ChildFolders()
|
ChildFolders()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,12 +143,12 @@ func (c Contacts) EnumerateContainers(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, fold := range resp.GetValue() {
|
for _, fold := range resp.GetValue() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkIDAndName(fold); err != nil {
|
if err := checkIDAndName(fold); err != nil {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(ctx).
|
WithClues(ctx).
|
||||||
With(graph.ErrData(err)...).
|
With(graph.ErrData(err)...).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
@ -163,7 +163,7 @@ func (c Contacts) EnumerateContainers(
|
|||||||
|
|
||||||
temp := graph.NewCacheFolder(fold, nil, nil)
|
temp := graph.NewCacheFolder(fold, nil, nil)
|
||||||
if err := fn(temp); err != nil {
|
if err := fn(temp); err != nil {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(fctx).
|
WithClues(fctx).
|
||||||
With(graph.ErrData(err)...).
|
With(graph.ErrData(err)...).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
@ -180,7 +180,7 @@ func (c Contacts) EnumerateContainers(
|
|||||||
builder = users.NewItemContactFoldersItemChildFoldersRequestBuilder(link, service.Adapter())
|
builder = users.NewItemContactFoldersItemChildFoldersRequestBuilder(link, service.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -96,7 +96,7 @@ func (c Events) GetContainerByID(
|
|||||||
func (c Events) GetItem(
|
func (c Events) GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
@ -141,7 +141,7 @@ func (c Events) EnumerateContainers(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, baseDirID string,
|
userID, baseDirID string,
|
||||||
fn func(graph.CacheFolder) error,
|
fn func(graph.CacheFolder) error,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
service, err := c.service()
|
service, err := c.service()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -153,11 +153,11 @@ func (c Events) EnumerateContainers(
|
|||||||
return clues.Wrap(err, "setting calendar options").WithClues(ctx).With(graph.ErrData(err)...)
|
return clues.Wrap(err, "setting calendar options").WithClues(ctx).With(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
builder := service.Client().UsersById(userID).Calendars()
|
builder := service.Client().UsersById(userID).Calendars()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,13 +167,13 @@ func (c Events) EnumerateContainers(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, cal := range resp.GetValue() {
|
for _, cal := range resp.GetValue() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
cd := CalendarDisplayable{Calendarable: cal}
|
cd := CalendarDisplayable{Calendarable: cal}
|
||||||
if err := checkIDAndName(cd); err != nil {
|
if err := checkIDAndName(cd); err != nil {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(ctx).
|
WithClues(ctx).
|
||||||
With(graph.ErrData(err)...).
|
With(graph.ErrData(err)...).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
@ -191,7 +191,7 @@ func (c Events) EnumerateContainers(
|
|||||||
path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path
|
path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path
|
||||||
path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location
|
path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location
|
||||||
if err := fn(temp); err != nil {
|
if err := fn(temp); err != nil {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(fctx).
|
WithClues(fctx).
|
||||||
With(graph.ErrData(err)...).
|
With(graph.ErrData(err)...).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
@ -208,7 +208,7 @@ func (c Events) EnumerateContainers(
|
|||||||
builder = users.NewItemCalendarsRequestBuilder(link, service.Adapter())
|
builder = users.NewItemCalendarsRequestBuilder(link, service.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -124,7 +124,7 @@ func (c Mail) GetContainerByID(
|
|||||||
func (c Mail) GetItem(
|
func (c Mail) GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil)
|
mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -164,21 +164,21 @@ func (c Mail) EnumerateContainers(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, baseDirID string,
|
userID, baseDirID string,
|
||||||
fn func(graph.CacheFolder) error,
|
fn func(graph.CacheFolder) error,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
service, err := c.service()
|
service, err := c.service()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
return clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
builder := service.Client().
|
builder := service.Client().
|
||||||
UsersById(userID).
|
UsersById(userID).
|
||||||
MailFolders().
|
MailFolders().
|
||||||
Delta()
|
Delta()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ func (c Mail) EnumerateContainers(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range resp.GetValue() {
|
for _, v := range resp.GetValue() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ func (c Mail) EnumerateContainers(
|
|||||||
|
|
||||||
temp := graph.NewCacheFolder(v, nil, nil)
|
temp := graph.NewCacheFolder(v, nil, nil)
|
||||||
if err := fn(temp); err != nil {
|
if err := fn(temp); err != nil {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(fctx).
|
WithClues(fctx).
|
||||||
With(graph.ErrData(err)...).
|
With(graph.ErrData(err)...).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
@ -216,7 +216,7 @@ func (c Mail) EnumerateContainers(
|
|||||||
builder = users.NewItemMailFoldersDeltaRequestBuilder(link, service.Adapter())
|
builder = users.NewItemMailFoldersDeltaRequestBuilder(link, service.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -47,7 +47,7 @@ func (cfc *contactFolderCache) populateContactRoot(
|
|||||||
// as of (Oct-07-2022)
|
// as of (Oct-07-2022)
|
||||||
func (cfc *contactFolderCache) Populate(
|
func (cfc *contactFolderCache) Populate(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
baseID string,
|
baseID string,
|
||||||
baseContainerPather ...string,
|
baseContainerPather ...string,
|
||||||
) error {
|
) error {
|
||||||
|
|||||||
@ -28,7 +28,7 @@ type containersEnumerator interface {
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, baseDirID string,
|
userID, baseDirID string,
|
||||||
fn func(graph.CacheFolder) error,
|
fn func(graph.CacheFolder) error,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error
|
) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -64,7 +64,7 @@ type DeltaPath struct {
|
|||||||
func parseMetadataCollections(
|
func parseMetadataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
colls []data.RestoreCollection,
|
colls []data.RestoreCollection,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (CatDeltaPaths, error) {
|
) (CatDeltaPaths, error) {
|
||||||
// cdp stores metadata
|
// cdp stores metadata
|
||||||
cdp := CatDeltaPaths{
|
cdp := CatDeltaPaths{
|
||||||
@ -168,7 +168,7 @@ func DataCollections(
|
|||||||
acct account.M365Config,
|
acct account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
eb, err := selector.ToExchangeBackup()
|
eb, err := selector.ToExchangeBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -178,7 +178,7 @@ func DataCollections(
|
|||||||
var (
|
var (
|
||||||
user = selector.DiscreteOwner
|
user = selector.DiscreteOwner
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, metadata, errs)
|
cdps, err := parseMetadataCollections(ctx, metadata, errs)
|
||||||
@ -187,7 +187,7 @@ func DataCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, scope := range eb.Scopes() {
|
for _, scope := range eb.Scopes() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,14 +201,14 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, dcs...)
|
collections = append(collections, dcs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil, et.Err()
|
return collections, nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) {
|
func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) {
|
||||||
@ -235,7 +235,7 @@ func createCollections(
|
|||||||
dps DeltaPaths,
|
dps DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
var (
|
var (
|
||||||
allCollections = make([]data.BackupCollection, 0)
|
allCollections = make([]data.BackupCollection, 0)
|
||||||
|
|||||||
@ -62,7 +62,7 @@ func (ecc *eventCalendarCache) populateEventRoot(ctx context.Context) error {
|
|||||||
// @param baseID: ignored. Present to conform to interface
|
// @param baseID: ignored. Present to conform to interface
|
||||||
func (ecc *eventCalendarCache) Populate(
|
func (ecc *eventCalendarCache) Populate(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
baseID string,
|
baseID string,
|
||||||
baseContainerPath ...string,
|
baseContainerPath ...string,
|
||||||
) error {
|
) error {
|
||||||
|
|||||||
@ -45,7 +45,7 @@ type itemer interface {
|
|||||||
GetItem(
|
GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error)
|
) (serialization.Parsable, *details.ExchangeInfo, error)
|
||||||
Serialize(
|
Serialize(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@ -127,7 +127,7 @@ func NewCollection(
|
|||||||
|
|
||||||
// Items utility function to asynchronously execute process to fill data channel with
|
// Items utility function to asynchronously execute process to fill data channel with
|
||||||
// M365 exchange objects and returns the data channel
|
// M365 exchange objects and returns the data channel
|
||||||
func (col *Collection) Items(ctx context.Context, errs *fault.Errors) <-chan data.Stream {
|
func (col *Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||||
go col.streamItems(ctx, errs)
|
go col.streamItems(ctx, errs)
|
||||||
return col.data
|
return col.data
|
||||||
}
|
}
|
||||||
@ -163,7 +163,7 @@ func (col Collection) DoNotMergeItems() bool {
|
|||||||
|
|
||||||
// streamItems is a utility function that uses col.collectionType to be able to serialize
|
// streamItems is a utility function that uses col.collectionType to be able to serialize
|
||||||
// all the M365IDs defined in the added field. data channel is closed by this function
|
// all the M365IDs defined in the added field. data channel is closed by this function
|
||||||
func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
|
func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
||||||
var (
|
var (
|
||||||
success int64
|
success int64
|
||||||
totalBytes int64
|
totalBytes int64
|
||||||
@ -177,7 +177,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
col.finishPopulation(ctx, int(success), totalBytes, errs.Err())
|
col.finishPopulation(ctx, int(success), totalBytes, errs.Failure())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if len(col.added)+len(col.removed) > 0 {
|
if len(col.added)+len(col.removed) > 0 {
|
||||||
@ -226,7 +226,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
|
|
||||||
// add any new items
|
// add any new items
|
||||||
for id := range col.added {
|
for id := range col.added {
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
|
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
|
||||||
} else {
|
} else {
|
||||||
errs.Add(clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
errs.AddRecoverable(clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -261,7 +261,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
|
|
||||||
data, err := col.items.Serialize(ctx, item, user, id)
|
data, err := col.items.Serialize(ctx, item, user, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation))
|
errs.AddRecoverable(clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,7 +291,7 @@ func getItemWithRetries(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, itemID string,
|
userID, itemID string,
|
||||||
items itemer,
|
items itemer,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
item, info, err := items.GetItem(ctx, userID, itemID, errs)
|
item, info, err := items.GetItem(ctx, userID, itemID, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -30,7 +30,7 @@ type mockItemer struct {
|
|||||||
func (mi *mockItemer) GetItem(
|
func (mi *mockItemer) GetItem(
|
||||||
context.Context,
|
context.Context,
|
||||||
string, string,
|
string, string,
|
||||||
*fault.Errors,
|
*fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
mi.getCount++
|
mi.getCount++
|
||||||
return nil, nil, mi.getErr
|
return nil, nil, mi.getErr
|
||||||
|
|||||||
@ -72,7 +72,7 @@ func (mc *mailFolderCache) populateMailRoot(ctx context.Context) error {
|
|||||||
// for the base container in the cache.
|
// for the base container in the cache.
|
||||||
func (mc *mailFolderCache) Populate(
|
func (mc *mailFolderCache) Populate(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
baseID string,
|
baseID string,
|
||||||
baseContainerPath ...string,
|
baseContainerPath ...string,
|
||||||
) error {
|
) error {
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func createService(credentials account.M365Config) (*graph.Service, error) {
|
|||||||
func PopulateExchangeContainerResolver(
|
func PopulateExchangeContainerResolver(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
qp graph.QueryParams,
|
qp graph.QueryParams,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (graph.ContainerResolver, error) {
|
) (graph.ContainerResolver, error) {
|
||||||
var (
|
var (
|
||||||
res graph.ContainerResolver
|
res graph.ContainerResolver
|
||||||
|
|||||||
@ -39,7 +39,7 @@ func filterContainersAndFillCollections(
|
|||||||
scope selectors.ExchangeScope,
|
scope selectors.ExchangeScope,
|
||||||
dps DeltaPaths,
|
dps DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
var (
|
var (
|
||||||
// folder ID -> delta url or folder path lookups
|
// folder ID -> delta url or folder path lookups
|
||||||
@ -68,11 +68,11 @@ func filterContainersAndFillCollections(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
|
|
||||||
for _, c := range resolver.Items() {
|
for _, c := range resolver.Items() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
cID := *c.GetId()
|
cID := *c.GetId()
|
||||||
@ -102,7 +102,7 @@ func filterContainersAndFillCollections(
|
|||||||
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
|
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !graph.IsErrDeletedInFlight(err) {
|
if !graph.IsErrDeletedInFlight(err) {
|
||||||
et.Add(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,12 +157,12 @@ func filterContainersAndFillCollections(
|
|||||||
// in the `previousPath` set, but does not exist in the current container
|
// in the `previousPath` set, but does not exist in the current container
|
||||||
// resolver (which contains all the resource owners' current containers).
|
// resolver (which contains all the resource owners' current containers).
|
||||||
for id, p := range tombstones {
|
for id, p := range tombstones {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
if collections[id] != nil {
|
if collections[id] != nil {
|
||||||
et.Add(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ func filterContainersAndFillCollections(
|
|||||||
|
|
||||||
collections["metadata"] = col
|
collections["metadata"] = col
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// produces a set of id:path pairs from the deltapaths map.
|
// produces a set of id:path pairs from the deltapaths map.
|
||||||
|
|||||||
@ -91,8 +91,8 @@ func (m mockResolver) DestinationNameToID(dest string) string { return m.added[d
|
|||||||
func (m mockResolver) IDToPath(context.Context, string, bool) (*path.Builder, *path.Builder, error) {
|
func (m mockResolver) IDToPath(context.Context, string, bool) (*path.Builder, *path.Builder, error) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
func (m mockResolver) PathInCache(string) (string, bool) { return "", false }
|
func (m mockResolver) PathInCache(string) (string, bool) { return "", false }
|
||||||
func (m mockResolver) Populate(context.Context, *fault.Errors, string, ...string) error { return nil }
|
func (m mockResolver) Populate(context.Context, *fault.Bus, string, ...string) error { return nil }
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// tests
|
// tests
|
||||||
|
|||||||
@ -37,7 +37,7 @@ func RestoreExchangeObject(
|
|||||||
policy control.CollisionPolicy,
|
policy control.CollisionPolicy,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
destination, user string,
|
destination, user string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.ExchangeInfo, error) {
|
) (*details.ExchangeInfo, error) {
|
||||||
if policy != control.Copy {
|
if policy != control.Copy {
|
||||||
return nil, clues.Wrap(clues.New(policy.String()), "policy not supported for Exchange restore").WithClues(ctx)
|
return nil, clues.Wrap(clues.New(policy.String()), "policy not supported for Exchange restore").WithClues(ctx)
|
||||||
@ -102,7 +102,7 @@ func RestoreExchangeEvent(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
cp control.CollisionPolicy,
|
cp control.CollisionPolicy,
|
||||||
destination, user string,
|
destination, user string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.ExchangeInfo, error) {
|
) (*details.ExchangeInfo, error) {
|
||||||
event, err := support.CreateEventFromBytes(bits)
|
event, err := support.CreateEventFromBytes(bits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -112,7 +112,7 @@ func RestoreExchangeEvent(
|
|||||||
ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId()))
|
ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId()))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
transformedEvent = support.ToEventSimplified(event)
|
transformedEvent = support.ToEventSimplified(event)
|
||||||
attached []models.Attachmentable
|
attached []models.Attachmentable
|
||||||
)
|
)
|
||||||
@ -140,19 +140,19 @@ func RestoreExchangeEvent(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, attach := range attached {
|
for _, attach := range attached {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := uploadAttachment(ctx, uploader, attach); err != nil {
|
if err := uploadAttachment(ctx, uploader, attach); err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info := api.EventInfo(event)
|
info := api.EventInfo(event)
|
||||||
info.Size = int64(len(bits))
|
info.Size = int64(len(bits))
|
||||||
|
|
||||||
return info, et.Err()
|
return info, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreMailMessage utility function to place an exchange.Mail
|
// RestoreMailMessage utility function to place an exchange.Mail
|
||||||
@ -167,7 +167,7 @@ func RestoreMailMessage(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
cp control.CollisionPolicy,
|
cp control.CollisionPolicy,
|
||||||
destination, user string,
|
destination, user string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.ExchangeInfo, error) {
|
) (*details.ExchangeInfo, error) {
|
||||||
// Creates messageable object from original bytes
|
// Creates messageable object from original bytes
|
||||||
originalMessage, err := support.CreateMessageFromBytes(bits)
|
originalMessage, err := support.CreateMessageFromBytes(bits)
|
||||||
@ -240,7 +240,7 @@ func SendMailToBackStore(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
user, destination string,
|
user, destination string,
|
||||||
message models.Messageable,
|
message models.Messageable,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
attached := message.GetAttachments()
|
attached := message.GetAttachments()
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ func SendMailToBackStore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
id = ptr.Val(response.GetId())
|
id = ptr.Val(response.GetId())
|
||||||
uploader = &mailAttachmentUploader{
|
uploader = &mailAttachmentUploader{
|
||||||
userID: user,
|
userID: user,
|
||||||
@ -268,7 +268,7 @@ func SendMailToBackStore(
|
|||||||
)
|
)
|
||||||
|
|
||||||
for _, attachment := range attached {
|
for _, attachment := range attached {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,13 +283,13 @@ func SendMailToBackStore(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
et.Add(errors.Wrap(err, "uploading mail attachment"))
|
el.AddRecoverable(errors.Wrap(err, "uploading mail attachment"))
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT
|
// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT
|
||||||
@ -302,7 +302,7 @@ func RestoreExchangeDataCollections(
|
|||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
directoryCaches = make(map[string]map[path.CategoryType]graph.ContainerResolver)
|
directoryCaches = make(map[string]map[path.CategoryType]graph.ContainerResolver)
|
||||||
@ -310,7 +310,7 @@ func RestoreExchangeDataCollections(
|
|||||||
userID string
|
userID string
|
||||||
// TODO policy to be updated from external source after completion of refactoring
|
// TODO policy to be updated from external source after completion of refactoring
|
||||||
policy = control.Copy
|
policy = control.Copy
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(dcs) > 0 {
|
if len(dcs) > 0 {
|
||||||
@ -319,7 +319,7 @@ func RestoreExchangeDataCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, dc := range dcs {
|
for _, dc := range dcs {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ func RestoreExchangeDataCollections(
|
|||||||
userCaches,
|
userCaches,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "creating destination").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "creating destination").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,10 +355,10 @@ func RestoreExchangeDataCollections(
|
|||||||
support.Restore,
|
support.Restore,
|
||||||
len(dcs),
|
len(dcs),
|
||||||
metrics,
|
metrics,
|
||||||
et.Err(),
|
el.Failure(),
|
||||||
dest.ContainerName)
|
dest.ContainerName)
|
||||||
|
|
||||||
return status, et.Err()
|
return status, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// restoreCollection handles restoration of an individual collection.
|
// restoreCollection handles restoration of an individual collection.
|
||||||
@ -369,7 +369,7 @@ func restoreCollection(
|
|||||||
folderID string,
|
folderID string,
|
||||||
policy control.CollisionPolicy,
|
policy control.CollisionPolicy,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (support.CollectionMetrics, bool) {
|
) (support.CollectionMetrics, bool) {
|
||||||
ctx, end := D.Span(ctx, "gc:exchange:restoreCollection", D.Label("path", dc.FullPath()))
|
ctx, end := D.Span(ctx, "gc:exchange:restoreCollection", D.Label("path", dc.FullPath()))
|
||||||
defer end()
|
defer end()
|
||||||
@ -400,11 +400,11 @@ func restoreCollection(
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
errs.Add(clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx))
|
||||||
return metrics, true
|
return metrics, true
|
||||||
|
|
||||||
case itemData, ok := <-items:
|
case itemData, ok := <-items:
|
||||||
if !ok || errs.Err() != nil {
|
if !ok || errs.Failure() != nil {
|
||||||
return metrics, false
|
return metrics, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,7 +416,7 @@ func restoreCollection(
|
|||||||
|
|
||||||
_, err := buf.ReadFrom(itemData.ToReader())
|
_, err := buf.ReadFrom(itemData.ToReader())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "reading item bytes").WithClues(ictx))
|
errs.AddRecoverable(clues.Wrap(err, "reading item bytes").WithClues(ictx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ func restoreCollection(
|
|||||||
user,
|
user,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(err)
|
errs.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,7 +441,7 @@ func restoreCollection(
|
|||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "building full path with item").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(err, "building full path with item").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -476,7 +476,7 @@ func CreateContainerDestination(
|
|||||||
directory path.Path,
|
directory path.Path,
|
||||||
destination string,
|
destination string,
|
||||||
caches map[path.CategoryType]graph.ContainerResolver,
|
caches map[path.CategoryType]graph.ContainerResolver,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
var (
|
var (
|
||||||
newCache = false
|
newCache = false
|
||||||
@ -589,7 +589,7 @@ func establishMailRestoreLocation(
|
|||||||
mfc graph.ContainerResolver,
|
mfc graph.ContainerResolver,
|
||||||
user string,
|
user string,
|
||||||
isNewCache bool,
|
isNewCache bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
// Process starts with the root folder in order to recreate
|
// Process starts with the root folder in order to recreate
|
||||||
// the top-level folder with the same tactic
|
// the top-level folder with the same tactic
|
||||||
@ -648,7 +648,7 @@ func establishContactsRestoreLocation(
|
|||||||
cfc graph.ContainerResolver,
|
cfc graph.ContainerResolver,
|
||||||
user string,
|
user string,
|
||||||
isNewCache bool,
|
isNewCache bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
cached, ok := cfc.PathInCache(folders[0])
|
cached, ok := cfc.PathInCache(folders[0])
|
||||||
if ok {
|
if ok {
|
||||||
@ -684,7 +684,7 @@ func establishEventsRestoreLocation(
|
|||||||
ecc graph.ContainerResolver, // eventCalendarCache
|
ecc graph.ContainerResolver, // eventCalendarCache
|
||||||
user string,
|
user string,
|
||||||
isNewCache bool,
|
isNewCache bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
// Need to prefix with the "Other Calendars" folder so lookup happens properly.
|
// Need to prefix with the "Other Calendars" folder so lookup happens properly.
|
||||||
cached, ok := ecc.PathInCache(folders[0])
|
cached, ok := ecc.PathInCache(folders[0])
|
||||||
|
|||||||
@ -65,7 +65,7 @@ type ContainerResolver interface {
|
|||||||
// @param ctx is necessary param for Graph API tracing
|
// @param ctx is necessary param for Graph API tracing
|
||||||
// @param baseFolderID represents the M365ID base that the resolver will
|
// @param baseFolderID represents the M365ID base that the resolver will
|
||||||
// conclude its search. Default input is "".
|
// conclude its search. Default input is "".
|
||||||
Populate(ctx context.Context, errs *fault.Errors, baseFolderID string, baseContainerPather ...string) error
|
Populate(ctx context.Context, errs *fault.Bus, baseFolderID string, baseContainerPather ...string) error
|
||||||
|
|
||||||
// PathInCache performs a look up of a path reprensentation
|
// PathInCache performs a look up of a path reprensentation
|
||||||
// and returns the m365ID of directory iff the pathString
|
// and returns the m365ID of directory iff the pathString
|
||||||
|
|||||||
@ -134,7 +134,7 @@ func (md MetadataCollection) DoNotMergeItems() bool {
|
|||||||
|
|
||||||
func (md MetadataCollection) Items(
|
func (md MetadataCollection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_ *fault.Errors, // not used, just here for interface compliance
|
_ *fault.Bus, // not used, just here for interface compliance
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
|||||||
@ -68,7 +68,7 @@ func NewGraphConnector(
|
|||||||
itemClient *http.Client,
|
itemClient *http.Client,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
r resource,
|
r resource,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*GraphConnector, error) {
|
) (*GraphConnector, error) {
|
||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -129,7 +129,7 @@ func (gc *GraphConnector) createService() (*graph.Service, error) {
|
|||||||
// setTenantUsers queries the M365 to identify the users in the
|
// setTenantUsers queries the M365 to identify the users in the
|
||||||
// workspace. The users field is updated during this method
|
// workspace. The users field is updated during this method
|
||||||
// iff the returned error is nil
|
// iff the returned error is nil
|
||||||
func (gc *GraphConnector) setTenantUsers(ctx context.Context, errs *fault.Errors) error {
|
func (gc *GraphConnector) setTenantUsers(ctx context.Context, errs *fault.Bus) error {
|
||||||
ctx, end := D.Span(ctx, "gc:setTenantUsers")
|
ctx, end := D.Span(ctx, "gc:setTenantUsers")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ func (gc *GraphConnector) setTenantUsers(ctx context.Context, errs *fault.Errors
|
|||||||
// setTenantSites queries the M365 to identify the sites in the
|
// setTenantSites queries the M365 to identify the sites in the
|
||||||
// workspace. The sites field is updated during this method
|
// workspace. The sites field is updated during this method
|
||||||
// iff the returned error is nil.
|
// iff the returned error is nil.
|
||||||
func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Errors) error {
|
func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Bus) error {
|
||||||
gc.Sites = map[string]string{}
|
gc.Sites = map[string]string{}
|
||||||
|
|
||||||
ctx, end := D.Span(ctx, "gc:setTenantSites")
|
ctx, end := D.Span(ctx, "gc:setTenantSites")
|
||||||
@ -222,7 +222,7 @@ func (gc *GraphConnector) GetSiteIDs() []string {
|
|||||||
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(
|
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
ids, urls []string,
|
ids, urls []string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]string, error) {
|
) ([]string, error) {
|
||||||
if len(gc.Sites) == 0 {
|
if len(gc.Sites) == 0 {
|
||||||
if err := gc.setTenantSites(ctx, errs); err != nil {
|
if err := gc.setTenantSites(ctx, errs); err != nil {
|
||||||
@ -308,7 +308,7 @@ func getResources(
|
|||||||
query func(context.Context, graph.Servicer) (serialization.Parsable, error),
|
query func(context.Context, graph.Servicer) (serialization.Parsable, error),
|
||||||
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
|
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
|
||||||
identify func(any) (string, string, error),
|
identify func(any) (string, string, error),
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (map[string]string, error) {
|
) (map[string]string, error) {
|
||||||
resources := map[string]string{}
|
resources := map[string]string{}
|
||||||
|
|
||||||
@ -324,17 +324,17 @@ func getResources(
|
|||||||
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
|
|
||||||
callbackFunc := func(item any) bool {
|
callbackFunc := func(item any) bool {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
k, v, err := identify(item)
|
k, v, err := identify(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, errKnownSkippableCase) {
|
if !errors.Is(err, errKnownSkippableCase) {
|
||||||
et.Add(clues.Stack(err).
|
el.AddRecoverable(clues.Stack(err).
|
||||||
WithClues(ctx).
|
WithClues(ctx).
|
||||||
With("query_url", gs.Adapter().GetBaseUrl()))
|
With("query_url", gs.Adapter().GetBaseUrl()))
|
||||||
}
|
}
|
||||||
@ -351,5 +351,5 @@ func getResources(
|
|||||||
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources, et.Err()
|
return resources, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -111,7 +111,7 @@ func (medc MockExchangeDataCollection) DoNotMergeItems() bool { return med
|
|||||||
// channel is closed when there are no more items available.
|
// channel is closed when there are no more items available.
|
||||||
func (medc *MockExchangeDataCollection) Items(
|
func (medc *MockExchangeDataCollection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_ *fault.Errors, // unused
|
_ *fault.Bus, // unused
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
|||||||
@ -48,7 +48,7 @@ func (mlc *MockListCollection) PreviousPath() path.Path {
|
|||||||
|
|
||||||
func (mlc *MockListCollection) Items(
|
func (mlc *MockListCollection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_ *fault.Errors, // unused
|
_ *fault.Bus, // unused
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
|||||||
@ -165,7 +165,7 @@ func (oc *Collection) IsEmpty() bool {
|
|||||||
// Items() returns the channel containing M365 Exchange objects
|
// Items() returns the channel containing M365 Exchange objects
|
||||||
func (oc *Collection) Items(
|
func (oc *Collection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
errs *fault.Errors, // TODO: currently unused while onedrive isn't up to date with clues/fault
|
errs *fault.Bus, // TODO: currently unused while onedrive isn't up to date with clues/fault
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
go oc.populateItems(ctx, errs)
|
go oc.populateItems(ctx, errs)
|
||||||
return oc.data
|
return oc.data
|
||||||
@ -241,7 +241,7 @@ func (od *Item) ModTime() time.Time {
|
|||||||
|
|
||||||
// populateItems iterates through items added to the collection
|
// populateItems iterates through items added to the collection
|
||||||
// and uses the collection `itemReader` to read the item
|
// and uses the collection `itemReader` to read the item
|
||||||
func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
||||||
var (
|
var (
|
||||||
byteCount int64
|
byteCount int64
|
||||||
itemsRead int64
|
itemsRead int64
|
||||||
@ -249,7 +249,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
itemsFound int64
|
itemsFound int64
|
||||||
dirsFound int64
|
dirsFound int64
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retrieve the OneDrive folder path to set later in
|
// Retrieve the OneDrive folder path to set later in
|
||||||
@ -272,7 +272,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
defer close(semaphoreCh)
|
defer close(semaphoreCh)
|
||||||
|
|
||||||
for _, item := range oc.driveItems {
|
for _, item := range oc.driveItems {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,7 +323,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
oc.ctrl.ToggleFeatures.EnablePermissionsBackup)
|
oc.ctrl.ToggleFeatures.EnablePermissionsBackup)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -370,7 +370,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
|
|
||||||
// check for errors following retries
|
// check for errors following retries
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +443,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Errors) {
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, et.Err())
|
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, el.Failure())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, err error) {
|
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, err error) {
|
||||||
|
|||||||
@ -119,7 +119,7 @@ func NewCollections(
|
|||||||
func deserializeMetadata(
|
func deserializeMetadata(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cols []data.RestoreCollection,
|
cols []data.RestoreCollection,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (map[string]string, map[string]map[string]string, error) {
|
) (map[string]string, map[string]map[string]string, error) {
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"deserialzing previous backup metadata",
|
"deserialzing previous backup metadata",
|
||||||
@ -128,11 +128,11 @@ func deserializeMetadata(
|
|||||||
var (
|
var (
|
||||||
prevDeltas = map[string]string{}
|
prevDeltas = map[string]string{}
|
||||||
prevFolders = map[string]map[string]string{}
|
prevFolders = map[string]map[string]string{}
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, col := range cols {
|
for _, col := range cols {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -186,7 +186,7 @@ func deserializeMetadata(
|
|||||||
|
|
||||||
err = clues.Stack(err).WithClues(ictx)
|
err = clues.Stack(err).WithClues(ictx)
|
||||||
|
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
logger.Ctx(ictx).
|
logger.Ctx(ictx).
|
||||||
With("err", err).
|
With("err", err).
|
||||||
Errorw("deserializing base backup metadata", clues.InErr(err).Slice()...)
|
Errorw("deserializing base backup metadata", clues.InErr(err).Slice()...)
|
||||||
@ -216,7 +216,7 @@ func deserializeMetadata(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return prevDeltas, prevFolders, et.Err()
|
return prevDeltas, prevFolders, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
var errExistingMapping = clues.New("mapping already exists for same drive ID")
|
var errExistingMapping = clues.New("mapping already exists for same drive ID")
|
||||||
@ -257,7 +257,7 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
|
|||||||
func (c *Collections) Get(
|
func (c *Collections) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevMetadata []data.RestoreCollection,
|
prevMetadata []data.RestoreCollection,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs)
|
prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -599,12 +599,12 @@ func (c *Collections) UpdateCollections(
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]string,
|
itemCollection map[string]string,
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
et := errs.Tracker()
|
el := errs.Local()
|
||||||
|
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -632,7 +632,9 @@ func (c *Collections) UpdateCollections(
|
|||||||
|
|
||||||
collectionPath, err := c.getCollectionPath(driveID, item)
|
collectionPath, err := c.getCollectionPath(driveID, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Stack(err).WithClues(ictx)
|
el.AddRecoverable(clues.Stack(err).
|
||||||
|
WithClues(ictx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip items that don't match the folder selectors we were given.
|
// Skip items that don't match the folder selectors we were given.
|
||||||
@ -650,7 +652,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
if ok {
|
if ok {
|
||||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "invalid previous path").
|
el.AddRecoverable(clues.Wrap(err, "invalid previous path").
|
||||||
WithClues(ictx).
|
WithClues(ictx).
|
||||||
With("path_string", prevPathStr))
|
With("path_string", prevPathStr))
|
||||||
}
|
}
|
||||||
@ -754,7 +756,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return et.Err()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldSkipDrive(ctx context.Context, drivePath path.Path, m folderMatcher, driveName string) bool {
|
func shouldSkipDrive(ctx context.Context, drivePath path.Path, m folderMatcher, driveName string) bool {
|
||||||
|
|||||||
@ -2046,7 +2046,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() {
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]string,
|
itemCollection map[string]string,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -38,7 +38,7 @@ func DataCollections(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
odb, err := selector.ToOneDriveBackup()
|
odb, err := selector.ToOneDriveBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -46,7 +46,7 @@ func DataCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
user = selector.DiscreteOwner
|
user = selector.DiscreteOwner
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
allExcludes = map[string]struct{}{}
|
allExcludes = map[string]struct{}{}
|
||||||
@ -54,7 +54,7 @@ func DataCollections(
|
|||||||
|
|
||||||
// for each scope that includes oneDrive items, get all
|
// for each scope that includes oneDrive items, get all
|
||||||
for _, scope := range odb.Scopes() {
|
for _, scope := range odb.Scopes() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ func DataCollections(
|
|||||||
|
|
||||||
odcs, excludes, err := nc.Get(ctx, metadata, errs)
|
odcs, excludes, err := nc.Get(ctx, metadata, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, odcs...)
|
collections = append(collections, odcs...)
|
||||||
@ -80,5 +80,5 @@ func DataCollections(
|
|||||||
maps.Copy(allExcludes, excludes)
|
maps.Copy(allExcludes, excludes)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, allExcludes, et.Err()
|
return collections, allExcludes, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -150,7 +150,7 @@ type itemCollector func(
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
fileCollectionMap map[string]string,
|
fileCollectionMap map[string]string,
|
||||||
validPrevDelta bool,
|
validPrevDelta bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
type itemPager interface {
|
type itemPager interface {
|
||||||
@ -196,7 +196,7 @@ func collectItems(
|
|||||||
collector itemCollector,
|
collector itemCollector,
|
||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
prevDelta string,
|
prevDelta string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
|
) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
|
||||||
var (
|
var (
|
||||||
newDeltaURL = ""
|
newDeltaURL = ""
|
||||||
@ -360,7 +360,7 @@ func GetAllFolders(
|
|||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
pager drivePager,
|
pager drivePager,
|
||||||
prefix string,
|
prefix string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]*Displayable, error) {
|
) ([]*Displayable, error) {
|
||||||
drives, err := drives(ctx, pager, true)
|
drives, err := drives(ctx, pager, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -369,11 +369,11 @@ func GetAllFolders(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
folders = map[string]*Displayable{}
|
folders = map[string]*Displayable{}
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,7 +392,7 @@ func GetAllFolders(
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]string,
|
itemCollection map[string]string,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
// Skip the root item.
|
// Skip the root item.
|
||||||
@ -425,7 +425,7 @@ func GetAllFolders(
|
|||||||
|
|
||||||
_, _, _, err = collectItems(ictx, defaultItemPager(gs, id, ""), id, name, collector, map[string]string{}, "", errs)
|
_, _, _, err = collectItems(ictx, defaultItemPager(gs, id, ""), id, name, collector, map[string]string{}, "", errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "enumerating items in drive"))
|
el.AddRecoverable(clues.Wrap(err, "enumerating items in drive"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ func GetAllFolders(
|
|||||||
res = append(res, f)
|
res = append(res, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, et.Err()
|
return res, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeleteItem(
|
func DeleteItem(
|
||||||
|
|||||||
@ -75,7 +75,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]string,
|
itemCollection map[string]string,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.GetFile() != nil {
|
if item.GetFile() != nil {
|
||||||
|
|||||||
@ -39,7 +39,7 @@ func RestoreCollections(
|
|||||||
opts control.Options,
|
opts control.Options,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
restoreMetrics support.CollectionMetrics
|
restoreMetrics support.CollectionMetrics
|
||||||
@ -63,13 +63,13 @@ func RestoreCollections(
|
|||||||
})
|
})
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
parentPermissions = map[string][]UserPermission{}
|
parentPermissions = map[string][]UserPermission{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Iterate through the data collections and restore the contents of each
|
// Iterate through the data collections and restore the contents of each
|
||||||
for _, dc := range dcs {
|
for _, dc := range dcs {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +95,7 @@ func RestoreCollections(
|
|||||||
opts.RestorePermissions,
|
opts.RestorePermissions,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range folderPerms {
|
for k, v := range folderPerms {
|
||||||
@ -114,10 +114,10 @@ func RestoreCollections(
|
|||||||
support.Restore,
|
support.Restore,
|
||||||
len(dcs),
|
len(dcs),
|
||||||
restoreMetrics,
|
restoreMetrics,
|
||||||
et.Err(),
|
el.Failure(),
|
||||||
dest.ContainerName)
|
dest.ContainerName)
|
||||||
|
|
||||||
return status, et.Err()
|
return status, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreCollection handles restoration of an individual collection.
|
// RestoreCollection handles restoration of an individual collection.
|
||||||
@ -135,7 +135,7 @@ func RestoreCollection(
|
|||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
permissionIDMappings map[string]string,
|
permissionIDMappings map[string]string,
|
||||||
restorePerms bool,
|
restorePerms bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, error) {
|
) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, error) {
|
||||||
var (
|
var (
|
||||||
metrics = support.CollectionMetrics{}
|
metrics = support.CollectionMetrics{}
|
||||||
@ -195,12 +195,12 @@ func RestoreCollection(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
items = dc.Items(ctx, errs)
|
items = dc.Items(ctx, errs)
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ func RestoreCollection(
|
|||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ func RestoreCollection(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ func RestoreCollection(
|
|||||||
|
|
||||||
meta, err := getMetadata(metaReader)
|
meta, err := getMetadata(metaReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "getting directory metadata").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "getting directory metadata").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,7 +315,7 @@ func RestoreCollection(
|
|||||||
copyBuffer,
|
copyBuffer,
|
||||||
source)
|
source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,7 +331,7 @@ func RestoreCollection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, folderPerms, permissionIDMappings, et.Err()
|
return metrics, folderPerms, permissionIDMappings, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileFetcher interface {
|
type fileFetcher interface {
|
||||||
|
|||||||
@ -30,13 +30,13 @@ func GetSitePages(
|
|||||||
serv *discover.BetaService,
|
serv *discover.BetaService,
|
||||||
siteID string,
|
siteID string,
|
||||||
pages []string,
|
pages []string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]models.SitePageable, error) {
|
) ([]models.SitePageable, error) {
|
||||||
var (
|
var (
|
||||||
col = make([]models.SitePageable, 0)
|
col = make([]models.SitePageable, 0)
|
||||||
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||||
opts = retrieveSitePageOptions()
|
opts = retrieveSitePageOptions()
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
)
|
)
|
||||||
@ -51,7 +51,7 @@ func GetSitePages(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range pages {
|
for _, entry := range pages {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ func GetSitePages(
|
|||||||
|
|
||||||
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
|
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "fetching page").WithClues(ctx).With(graph.ErrData(err)...))
|
el.AddRecoverable(clues.Wrap(err, "fetching page").WithClues(ctx).With(graph.ErrData(err)...))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ func GetSitePages(
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
return col, et.Err()
|
return col, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSite returns a minimal Site with the SiteID and the WebURL
|
// GetSite returns a minimal Site with the SiteID and the WebURL
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func (sc Collection) DoNotMergeItems() bool {
|
|||||||
|
|
||||||
func (sc *Collection) Items(
|
func (sc *Collection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
go sc.populate(ctx, errs)
|
go sc.populate(ctx, errs)
|
||||||
return sc.data
|
return sc.data
|
||||||
@ -183,7 +183,7 @@ func (sc *Collection) finishPopulation(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// populate utility function to retrieve data from back store for a given collection
|
// populate utility function to retrieve data from back store for a given collection
|
||||||
func (sc *Collection) populate(ctx context.Context, errs *fault.Errors) {
|
func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
||||||
var (
|
var (
|
||||||
metrics numMetrics
|
metrics numMetrics
|
||||||
writer = kw.NewJsonSerializationWriter()
|
writer = kw.NewJsonSerializationWriter()
|
||||||
@ -221,11 +221,11 @@ func (sc *Collection) retrieveLists(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
wtr *kw.JsonSerializationWriter,
|
wtr *kw.JsonSerializationWriter,
|
||||||
progress chan<- struct{},
|
progress chan<- struct{},
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (numMetrics, error) {
|
) (numMetrics, error) {
|
||||||
var (
|
var (
|
||||||
metrics numMetrics
|
metrics numMetrics
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs, errs)
|
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs, errs)
|
||||||
@ -237,13 +237,13 @@ func (sc *Collection) retrieveLists(
|
|||||||
// For each models.Listable, object is serialized and the metrics are collected.
|
// For each models.Listable, object is serialized and the metrics are collected.
|
||||||
// The progress is objected via the passed in channel.
|
// The progress is objected via the passed in channel.
|
||||||
for _, lst := range lists {
|
for _, lst := range lists {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
byteArray, err := serializeContent(wtr, lst)
|
byteArray, err := serializeContent(wtr, lst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,18 +269,18 @@ func (sc *Collection) retrieveLists(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, et.Err()
|
return metrics, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *Collection) retrievePages(
|
func (sc *Collection) retrievePages(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
wtr *kw.JsonSerializationWriter,
|
wtr *kw.JsonSerializationWriter,
|
||||||
progress chan<- struct{},
|
progress chan<- struct{},
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (numMetrics, error) {
|
) (numMetrics, error) {
|
||||||
var (
|
var (
|
||||||
metrics numMetrics
|
metrics numMetrics
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
betaService := sc.betaService
|
betaService := sc.betaService
|
||||||
@ -305,13 +305,13 @@ func (sc *Collection) retrievePages(
|
|||||||
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
||||||
// TODO: Verify Parsable interface supported with modified-Pageable
|
// TODO: Verify Parsable interface supported with modified-Pageable
|
||||||
for _, pg := range pages {
|
for _, pg := range pages {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
byteArray, err := serializeContent(wtr, pg)
|
byteArray, err := serializeContent(wtr, pg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,7 +331,7 @@ func (sc *Collection) retrievePages(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, et.Err()
|
return metrics, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) {
|
func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) {
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func DataCollections(
|
|||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
su statusUpdater,
|
su statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
b, err := selector.ToSharePointBackup()
|
b, err := selector.ToSharePointBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -44,13 +44,13 @@ func DataCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
site = b.DiscreteOwner
|
site = b.DiscreteOwner
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, scope := range b.Scopes() {
|
for _, scope := range b.Scopes() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ func DataCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ func DataCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ func DataCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func DataCollections(
|
|||||||
foldersComplete <- struct{}{}
|
foldersComplete <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil, et.Err()
|
return collections, nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectLists(
|
func collectLists(
|
||||||
@ -122,12 +122,12 @@ func collectLists(
|
|||||||
tenantID, siteID string,
|
tenantID, siteID string,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
|
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
spcs = make([]data.BackupCollection, 0)
|
spcs = make([]data.BackupCollection, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ func collectLists(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tuple := range lists {
|
for _, tuple := range lists {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ func collectLists(
|
|||||||
path.ListsCategory,
|
path.ListsCategory,
|
||||||
false)
|
false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "creating list collection path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "creating list collection path").WithClues(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
|
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
|
||||||
@ -157,7 +157,7 @@ func collectLists(
|
|||||||
spcs = append(spcs, collection)
|
spcs = append(spcs, collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
return spcs, et.Err()
|
return spcs, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// collectLibraries constructs a onedrive Collections struct and Get()s
|
// collectLibraries constructs a onedrive Collections struct and Get()s
|
||||||
@ -170,7 +170,7 @@ func collectLibraries(
|
|||||||
scope selectors.SharePointScope,
|
scope selectors.SharePointScope,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
||||||
|
|
||||||
@ -206,12 +206,12 @@ func collectPages(
|
|||||||
siteID string,
|
siteID string,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
logger.Ctx(ctx).Debug("creating SharePoint Pages collections")
|
logger.Ctx(ctx).Debug("creating SharePoint Pages collections")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
spcs = make([]data.BackupCollection, 0)
|
spcs = make([]data.BackupCollection, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ func collectPages(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tuple := range tuples {
|
for _, tuple := range tuples {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,7 +241,7 @@ func collectPages(
|
|||||||
path.PagesCategory,
|
path.PagesCategory,
|
||||||
false)
|
false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "creating page collection path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "creating page collection path").WithClues(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
|
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
|
||||||
@ -251,7 +251,7 @@ func collectPages(
|
|||||||
spcs = append(spcs, collection)
|
spcs = append(spcs, collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
return spcs, et.Err()
|
return spcs, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
type folderMatcher struct {
|
type folderMatcher struct {
|
||||||
|
|||||||
@ -92,12 +92,12 @@ func loadSiteLists(
|
|||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
siteID string,
|
siteID string,
|
||||||
listIDs []string,
|
listIDs []string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]models.Listable, error) {
|
) ([]models.Listable, error) {
|
||||||
var (
|
var (
|
||||||
results = make([]models.Listable, 0)
|
results = make([]models.Listable, 0)
|
||||||
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
)
|
)
|
||||||
@ -112,7 +112,7 @@ func loadSiteLists(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, listID := range listIDs {
|
for _, listID := range listIDs {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,13 +131,13 @@ func loadSiteLists(
|
|||||||
|
|
||||||
entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil)
|
entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "getting site list").WithClues(ctx).With(graph.ErrData(err)...))
|
el.AddRecoverable(clues.Wrap(err, "getting site list").WithClues(ctx).With(graph.ErrData(err)...))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs)
|
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "getting list contents"))
|
el.AddRecoverable(clues.Wrap(err, "getting list contents"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ func loadSiteLists(
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
return results, et.Err()
|
return results, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchListContents utility function to retrieve associated M365 relationships
|
// fetchListContents utility function to retrieve associated M365 relationships
|
||||||
@ -160,7 +160,7 @@ func fetchListContents(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
siteID, listID string,
|
siteID, listID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (
|
) (
|
||||||
[]models.ColumnDefinitionable,
|
[]models.ColumnDefinitionable,
|
||||||
[]models.ContentTypeable,
|
[]models.ContentTypeable,
|
||||||
@ -193,17 +193,17 @@ func fetchListItems(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
siteID, listID string,
|
siteID, listID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]models.ListItemable, error) {
|
) ([]models.ListItemable, error) {
|
||||||
var (
|
var (
|
||||||
prefix = gs.Client().SitesById(siteID).ListsById(listID)
|
prefix = gs.Client().SitesById(siteID).ListsById(listID)
|
||||||
builder = prefix.Items()
|
builder = prefix.Items()
|
||||||
itms = make([]models.ListItemable, 0)
|
itms = make([]models.ListItemable, 0)
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ func fetchListItems(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, itm := range resp.GetValue() {
|
for _, itm := range resp.GetValue() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ func fetchListItems(
|
|||||||
|
|
||||||
fields, err := newPrefix.Fields().Get(ctx, nil)
|
fields, err := newPrefix.Fields().Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "getting list fields").WithClues(ctx).With(graph.ErrData(err)...))
|
el.AddRecoverable(clues.Wrap(err, "getting list fields").WithClues(ctx).With(graph.ErrData(err)...))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ func fetchListItems(
|
|||||||
builder = mssite.NewItemListsItemItemsRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
|
builder = mssite.NewItemListsItemItemsRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return itms, et.Err()
|
return itms, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchColumns utility function to return columns from a site.
|
// fetchColumns utility function to return columns from a site.
|
||||||
@ -300,16 +300,16 @@ func fetchContentTypes(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
siteID, listID string,
|
siteID, listID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]models.ContentTypeable, error) {
|
) ([]models.ContentTypeable, error) {
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
cTypes = make([]models.ContentTypeable, 0)
|
cTypes = make([]models.ContentTypeable, 0)
|
||||||
builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypes()
|
builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypes()
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ func fetchContentTypes(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, cont := range resp.GetValue() {
|
for _, cont := range resp.GetValue() {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +327,7 @@ func fetchContentTypes(
|
|||||||
|
|
||||||
links, err := fetchColumnLinks(ctx, gs, siteID, listID, id)
|
links, err := fetchColumnLinks(ctx, gs, siteID, listID, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,7 +335,7 @@ func fetchContentTypes(
|
|||||||
|
|
||||||
cs, err := fetchColumns(ctx, gs, siteID, listID, id)
|
cs, err := fetchColumns(ctx, gs, siteID, listID, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +351,7 @@ func fetchContentTypes(
|
|||||||
builder = mssite.NewItemListsItemContentTypesRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
|
builder = mssite.NewItemListsItemContentTypesRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return cTypes, et.Err()
|
return cTypes, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchColumnLinks(
|
func fetchColumnLinks(
|
||||||
|
|||||||
@ -46,7 +46,7 @@ func RestoreCollections(
|
|||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
@ -215,7 +215,7 @@ func RestoreListCollection(
|
|||||||
dc data.RestoreCollection,
|
dc data.RestoreCollection,
|
||||||
restoreContainerName string,
|
restoreContainerName string,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (support.CollectionMetrics, error) {
|
) (support.CollectionMetrics, error) {
|
||||||
ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath()))
|
ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath()))
|
||||||
defer end()
|
defer end()
|
||||||
@ -225,13 +225,13 @@ func RestoreListCollection(
|
|||||||
directory = dc.FullPath()
|
directory = dc.FullPath()
|
||||||
siteID = directory.ResourceOwner()
|
siteID = directory.ResourceOwner()
|
||||||
items = dc.Items(ctx, errs)
|
items = dc.Items(ctx, errs)
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String())
|
trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String())
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ func RestoreListCollection(
|
|||||||
siteID,
|
siteID,
|
||||||
restoreContainerName)
|
restoreContainerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,7 +260,7 @@ func RestoreListCollection(
|
|||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,7 +276,7 @@ func RestoreListCollection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, et.Err()
|
return metrics, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestorePageCollection handles restoration of an individual site page collection.
|
// RestorePageCollection handles restoration of an individual site page collection.
|
||||||
@ -289,7 +289,7 @@ func RestorePageCollection(
|
|||||||
dc data.RestoreCollection,
|
dc data.RestoreCollection,
|
||||||
restoreContainerName string,
|
restoreContainerName string,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (support.CollectionMetrics, error) {
|
) (support.CollectionMetrics, error) {
|
||||||
var (
|
var (
|
||||||
metrics = support.CollectionMetrics{}
|
metrics = support.CollectionMetrics{}
|
||||||
@ -308,13 +308,13 @@ func RestorePageCollection(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
service = discover.NewBetaService(adpt)
|
service = discover.NewBetaService(adpt)
|
||||||
items = dc.Items(ctx, errs)
|
items = dc.Items(ctx, errs)
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,7 +335,7 @@ func RestorePageCollection(
|
|||||||
siteID,
|
siteID,
|
||||||
restoreContainerName)
|
restoreContainerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +343,7 @@ func RestorePageCollection(
|
|||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,5 +359,5 @@ func RestorePageCollection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, et.Err()
|
return metrics, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,7 +33,7 @@ type Collection interface {
|
|||||||
// Each returned struct contains the next item in the collection
|
// Each returned struct contains the next item in the collection
|
||||||
// The channel is closed when there are no more items in the collection or if
|
// The channel is closed when there are no more items in the collection or if
|
||||||
// an unrecoverable error caused an early termination in the sender.
|
// an unrecoverable error caused an early termination in the sender.
|
||||||
Items(ctx context.Context, errs *fault.Errors) <-chan Stream
|
Items(ctx context.Context, errs *fault.Bus) <-chan Stream
|
||||||
// FullPath returns a path struct that acts as a metadata tag for this
|
// FullPath returns a path struct that acts as a metadata tag for this
|
||||||
// Collection.
|
// Collection.
|
||||||
FullPath() path.Path
|
FullPath() path.Path
|
||||||
|
|||||||
@ -26,7 +26,7 @@ type kopiaDataCollection struct {
|
|||||||
|
|
||||||
func (kdc *kopiaDataCollection) Items(
|
func (kdc *kopiaDataCollection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_ *fault.Errors, // unused, just matching the interface
|
_ *fault.Bus, // unused, just matching the interface
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
|||||||
@ -139,7 +139,7 @@ type corsoProgress struct {
|
|||||||
toMerge map[string]PrevRefs
|
toMerge map[string]PrevRefs
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
totalBytes int64
|
totalBytes int64
|
||||||
errs *fault.Errors
|
errs *fault.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kopia interface function used as a callback when kopia finishes processing a
|
// Kopia interface function used as a callback when kopia finishes processing a
|
||||||
@ -169,7 +169,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
|||||||
// never had to materialize their details in-memory.
|
// never had to materialize their details in-memory.
|
||||||
if d.info == nil {
|
if d.info == nil {
|
||||||
if d.prevPath == nil {
|
if d.prevPath == nil {
|
||||||
cp.errs.Add(clues.New("item sourced from previous backup with no previous path").
|
cp.errs.AddRecoverable(clues.New("item sourced from previous backup with no previous path").
|
||||||
With(
|
With(
|
||||||
"service", d.repoPath.Service().String(),
|
"service", d.repoPath.Service().String(),
|
||||||
"category", d.repoPath.Category().String(),
|
"category", d.repoPath.Category().String(),
|
||||||
@ -263,7 +263,7 @@ func (cp *corsoProgress) CachedFile(fname string, size int64) {
|
|||||||
func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
|
func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
|
||||||
defer cp.UploadProgress.Error(relpath, err, isIgnored)
|
defer cp.UploadProgress.Error(relpath, err, isIgnored)
|
||||||
|
|
||||||
cp.errs.Add(clues.Wrap(err, "kopia reported error").
|
cp.errs.AddRecoverable(clues.Wrap(err, "kopia reported error").
|
||||||
With("is_ignored", isIgnored, "relative_path", relpath).
|
With("is_ignored", isIgnored, "relative_path", relpath).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
@ -335,7 +335,7 @@ func collectionEntries(
|
|||||||
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "getting full item path")
|
err = errors.Wrap(err, "getting full item path")
|
||||||
progress.errs.Add(err)
|
progress.errs.AddRecoverable(err)
|
||||||
|
|
||||||
logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...)
|
logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...)
|
||||||
|
|
||||||
|
|||||||
@ -518,7 +518,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
|||||||
|
|
||||||
assert.Empty(t, cp.pending)
|
assert.Empty(t, cp.pending)
|
||||||
assert.Empty(t, bd.Details().Entries)
|
assert.Empty(t, bd.Details().Entries)
|
||||||
assert.Error(t, cp.errs.Err())
|
assert.Error(t, cp.errs.Failure())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||||
|
|||||||
@ -134,7 +134,7 @@ func (w Wrapper) BackupCollections(
|
|||||||
globalExcludeSet map[string]struct{},
|
globalExcludeSet map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*BackupStats, *details.Builder, map[string]PrevRefs, error) {
|
) (*BackupStats, *details.Builder, map[string]PrevRefs, error) {
|
||||||
if w.c == nil {
|
if w.c == nil {
|
||||||
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||||
@ -168,8 +168,7 @@ func (w Wrapper) BackupCollections(
|
|||||||
base,
|
base,
|
||||||
collections,
|
collections,
|
||||||
globalExcludeSet,
|
globalExcludeSet,
|
||||||
progress,
|
progress)
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "building kopia directories")
|
return nil, nil, nil, errors.Wrap(err, "building kopia directories")
|
||||||
}
|
}
|
||||||
@ -184,7 +183,7 @@ func (w Wrapper) BackupCollections(
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, progress.deets, progress.toMerge, progress.errs.Err()
|
return s, progress.deets, progress.toMerge, progress.errs.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w Wrapper) makeSnapshotWithRoot(
|
func (w Wrapper) makeSnapshotWithRoot(
|
||||||
@ -383,7 +382,7 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.Path,
|
||||||
bcounter ByteCounter,
|
bcounter ByteCounter,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
|
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
|
||||||
defer end()
|
defer end()
|
||||||
@ -400,23 +399,23 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
var (
|
var (
|
||||||
// Maps short ID of parent path to data collection for that folder.
|
// Maps short ID of parent path to data collection for that folder.
|
||||||
cols = map[string]*kopiaDataCollection{}
|
cols = map[string]*kopiaDataCollection{}
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, itemPath := range paths {
|
for _, itemPath := range paths {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, et.Err()
|
return nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
|
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
parentPath, err := itemPath.Dir()
|
parentPath, err := itemPath.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.Wrap(err, "making directory collection").
|
el.AddRecoverable(clues.Wrap(err, "making directory collection").
|
||||||
WithClues(ctx).
|
WithClues(ctx).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
@ -443,7 +442,7 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
res = append(res, c)
|
res = append(res, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, et.Err()
|
return res, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteSnapshot removes the provided manifest from kopia.
|
// DeleteSnapshot removes the provided manifest from kopia.
|
||||||
|
|||||||
@ -394,7 +394,7 @@ type mockBackupCollection struct {
|
|||||||
streams []data.Stream
|
streams []data.Stream
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockBackupCollection) Items(context.Context, *fault.Errors) <-chan data.Stream {
|
func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|||||||
@ -100,7 +100,7 @@ type backupStats struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type detailsWriter interface {
|
type detailsWriter interface {
|
||||||
WriteBackupDetails(context.Context, *details.Details, *fault.Errors) (string, error)
|
WriteBackupDetails(context.Context, *details.Details, *fault.Bus) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -166,12 +166,12 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
With("err", err).
|
With("err", err).
|
||||||
Errorw("doing backup", clues.InErr(err).Slice()...)
|
Errorw("doing backup", clues.InErr(err).Slice()...)
|
||||||
op.Errors.Fail(errors.Wrap(err, "doing backup"))
|
op.Errors.Fail(errors.Wrap(err, "doing backup"))
|
||||||
opStats.readErr = op.Errors.Err()
|
opStats.readErr = op.Errors.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: the consumer (sdk or cli) should run this, not operations.
|
// TODO: the consumer (sdk or cli) should run this, not operations.
|
||||||
recoverableCount := len(op.Errors.Errs())
|
recoverableCount := len(op.Errors.Recovered())
|
||||||
for i, err := range op.Errors.Errs() {
|
for i, err := range op.Errors.Recovered() {
|
||||||
logger.Ctx(ctx).
|
logger.Ctx(ctx).
|
||||||
With("error", err).
|
With("error", err).
|
||||||
With(clues.InErr(err).Slice()...).
|
With(clues.InErr(err).Slice()...).
|
||||||
@ -185,14 +185,14 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
err = op.persistResults(startTime, &opStats)
|
err = op.persistResults(startTime, &opStats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
|
op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
|
||||||
opStats.writeErr = op.Errors.Err()
|
opStats.writeErr = op.Errors.Failure()
|
||||||
|
|
||||||
return op.Errors.Err()
|
return op.Errors.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// force exit without backup in certain cases.
|
// force exit without backup in certain cases.
|
||||||
// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530
|
// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530
|
||||||
for _, e := range op.Errors.Errs() {
|
for _, e := range op.Errors.Recovered() {
|
||||||
if clues.HasLabel(e, fault.LabelForceNoBackupCreation) {
|
if clues.HasLabel(e, fault.LabelForceNoBackupCreation) {
|
||||||
logger.Ctx(ctx).
|
logger.Ctx(ctx).
|
||||||
With("error", e).
|
With("error", e).
|
||||||
@ -200,7 +200,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
Infow("completed backup; conditional error forcing exit without model persistence",
|
Infow("completed backup; conditional error forcing exit without model persistence",
|
||||||
"results", op.Results)
|
"results", op.Results)
|
||||||
|
|
||||||
return op.Errors.Fail(errors.Wrap(e, "forced backup")).Err()
|
return op.Errors.Fail(errors.Wrap(e, "forced backup")).Failure()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,9 +212,9 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
deets.Details())
|
deets.Details())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
op.Errors.Fail(errors.Wrap(err, "persisting backup"))
|
op.Errors.Fail(errors.Wrap(err, "persisting backup"))
|
||||||
opStats.writeErr = op.Errors.Err()
|
opStats.writeErr = op.Errors.Failure()
|
||||||
|
|
||||||
return op.Errors.Err()
|
return op.Errors.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
|
logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
|
||||||
@ -322,7 +322,7 @@ func produceBackupDataCollections(
|
|||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
metadata []data.RestoreCollection,
|
metadata []data.RestoreCollection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -346,7 +346,7 @@ type backuper interface {
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error)
|
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,7 +405,7 @@ func consumeBackupDataCollections(
|
|||||||
excludes map[string]struct{},
|
excludes map[string]struct{},
|
||||||
backupID model.StableID,
|
backupID model.StableID,
|
||||||
isIncremental bool,
|
isIncremental bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
|
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -514,7 +514,7 @@ func mergeDetails(
|
|||||||
mans []*kopia.ManifestEntry,
|
mans []*kopia.ManifestEntry,
|
||||||
shortRefsFromPrevBackup map[string]kopia.PrevRefs,
|
shortRefsFromPrevBackup map[string]kopia.PrevRefs,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
// Don't bother loading any of the base details if there's nothing we need to
|
// Don't bother loading any of the base details if there's nothing we need to
|
||||||
// merge.
|
// merge.
|
||||||
|
|||||||
@ -154,8 +154,8 @@ func runAndCheckBackup(
|
|||||||
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
|
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
|
||||||
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
|
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
|
||||||
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
|
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
|
||||||
assert.NoError(t, bo.Errors.Err(), "incremental non-recoverable error")
|
assert.NoError(t, bo.Errors.Failure(), "incremental non-recoverable error")
|
||||||
assert.Empty(t, bo.Errors.Errs(), "incremental recoverable/iteration errors")
|
assert.Empty(t, bo.Errors.Recovered(), "incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, bo.Results.ReadErrors, "errors reading data")
|
assert.NoError(t, bo.Results.ReadErrors, "errors reading data")
|
||||||
assert.NoError(t, bo.Results.WriteErrors, "errors writing data")
|
assert.NoError(t, bo.Results.WriteErrors, "errors writing data")
|
||||||
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
|
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
|
||||||
@ -626,8 +626,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
|
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
|
||||||
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
|
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
|
||||||
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
|
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
|
||||||
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error")
|
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error")
|
||||||
assert.Empty(t, incBO.Errors.Errs(), "count incremental recoverable/iteration errors")
|
assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
||||||
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||||
@ -1057,8 +1057,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
||||||
assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
|
assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
|
||||||
assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read")
|
assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read")
|
||||||
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error")
|
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error")
|
||||||
assert.Empty(t, incBO.Errors.Errs(), "incremental recoverable/iteration errors")
|
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
||||||
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||||
|
|||||||
@ -63,7 +63,7 @@ func (mr *mockRestorer) RestoreMultipleItems(
|
|||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.Path,
|
||||||
bc kopia.ByteCounter,
|
bc kopia.ByteCounter,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
mr.gotPaths = append(mr.gotPaths, paths...)
|
mr.gotPaths = append(mr.gotPaths, paths...)
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ func (mbu mockBackuper) BackupCollections(
|
|||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
|
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
|
||||||
if mbu.checkFunc != nil {
|
if mbu.checkFunc != nil {
|
||||||
mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
|
mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
|
||||||
@ -117,7 +117,7 @@ type mockDetailsReader struct {
|
|||||||
func (mdr mockDetailsReader) ReadBackupDetails(
|
func (mdr mockDetailsReader) ReadBackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
detailsID string,
|
detailsID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.Details, error) {
|
) (*details.Details, error) {
|
||||||
r := mdr.entries[detailsID]
|
r := mdr.entries[detailsID]
|
||||||
|
|
||||||
|
|||||||
@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type detailsReader interface {
|
type detailsReader interface {
|
||||||
ReadBackupDetails(ctx context.Context, detailsID string, errs *fault.Errors) (*details.Details, error)
|
ReadBackupDetails(ctx context.Context, detailsID string, errs *fault.Bus) (*details.Details, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBackupAndDetailsFromID(
|
func getBackupAndDetailsFromID(
|
||||||
@ -21,7 +21,7 @@ func getBackupAndDetailsFromID(
|
|||||||
backupID model.StableID,
|
backupID model.StableID,
|
||||||
ms *store.Wrapper,
|
ms *store.Wrapper,
|
||||||
detailsStore detailsReader,
|
detailsStore detailsReader,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*backup.Backup, *details.Details, error) {
|
) (*backup.Backup, *details.Details, error) {
|
||||||
dID, bup, err := ms.GetDetailsIDFromBackupID(ctx, backupID)
|
dID, bup, err := ms.GetDetailsIDFromBackupID(ctx, backupID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -45,7 +45,7 @@ func produceManifestsAndMetadata(
|
|||||||
reasons []kopia.Reason,
|
reasons []kopia.Reason,
|
||||||
tenantID string,
|
tenantID string,
|
||||||
getMetadata bool,
|
getMetadata bool,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
|
) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
|
||||||
var (
|
var (
|
||||||
metadataFiles = graph.AllMetadataFileNames()
|
metadataFiles = graph.AllMetadataFileNames()
|
||||||
@ -70,7 +70,7 @@ func produceManifestsAndMetadata(
|
|||||||
//
|
//
|
||||||
// TODO(ashmrtn): This may need updating if we start sourcing item backup
|
// TODO(ashmrtn): This may need updating if we start sourcing item backup
|
||||||
// details from previous snapshots when using kopia-assisted incrementals.
|
// details from previous snapshots when using kopia-assisted incrementals.
|
||||||
if err := verifyDistinctBases(ctx, ms, errs); err != nil {
|
if err := verifyDistinctBases(ctx, ms); err != nil {
|
||||||
logger.Ctx(ctx).With("error", err).Infow(
|
logger.Ctx(ctx).With("error", err).Infow(
|
||||||
"base snapshot collision, falling back to full backup",
|
"base snapshot collision, falling back to full backup",
|
||||||
clues.In(ctx).Slice()...)
|
clues.In(ctx).Slice()...)
|
||||||
@ -135,18 +135,10 @@ func produceManifestsAndMetadata(
|
|||||||
// of manifests, that each manifest's Reason (owner, service, category) is only
|
// of manifests, that each manifest's Reason (owner, service, category) is only
|
||||||
// included once. If a reason is duplicated by any two manifests, an error is
|
// included once. If a reason is duplicated by any two manifests, an error is
|
||||||
// returned.
|
// returned.
|
||||||
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs *fault.Errors) error {
|
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry) error {
|
||||||
var (
|
reasons := map[string]manifest.ID{}
|
||||||
failed bool
|
|
||||||
reasons = map[string]manifest.ID{}
|
|
||||||
et = errs.Tracker()
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, man := range mans {
|
for _, man := range mans {
|
||||||
if et.Err() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Incomplete snapshots are used only for kopia-assisted incrementals. The
|
// Incomplete snapshots are used only for kopia-assisted incrementals. The
|
||||||
// fact that we need this check here makes it seem like this should live in
|
// fact that we need this check here makes it seem like this should live in
|
||||||
// the kopia code. However, keeping it here allows for better debugging as
|
// the kopia code. However, keeping it here allows for better debugging as
|
||||||
@ -161,24 +153,16 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs
|
|||||||
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
|
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
|
||||||
|
|
||||||
if b, ok := reasons[reasonKey]; ok {
|
if b, ok := reasons[reasonKey]; ok {
|
||||||
failed = true
|
return clues.New("manifests have overlapping reasons").
|
||||||
|
|
||||||
et.Add(clues.New("manifests have overlapping reasons").
|
|
||||||
WithClues(ctx).
|
WithClues(ctx).
|
||||||
With("other_manifest_id", b))
|
With("other_manifest_id", b)
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons[reasonKey] = man.ID
|
reasons[reasonKey] = man.ID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if failed {
|
return nil
|
||||||
return clues.New("multiple base snapshots qualify").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return et.Err()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// collectMetadata retrieves all metadata files associated with the manifest.
|
// collectMetadata retrieves all metadata files associated with the manifest.
|
||||||
@ -188,7 +172,7 @@ func collectMetadata(
|
|||||||
man *kopia.ManifestEntry,
|
man *kopia.ManifestEntry,
|
||||||
fileNames []string,
|
fileNames []string,
|
||||||
tenantID string,
|
tenantID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
paths := []path.Path{}
|
paths := []path.Path{}
|
||||||
|
|
||||||
|
|||||||
@ -53,7 +53,7 @@ type mockColl struct {
|
|||||||
p path.Path
|
p path.Path
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc mockColl) Items(context.Context, *fault.Errors) <-chan data.Stream {
|
func (mc mockColl) Items(context.Context, *fault.Bus) <-chan data.Stream {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -393,7 +393,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
err := verifyDistinctBases(ctx, test.mans, fault.New(true))
|
err := verifyDistinctBases(ctx, test.mans)
|
||||||
test.expect(suite.T(), err)
|
test.expect(suite.T(), err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -837,7 +837,7 @@ func (suite *BackupManifestSuite) TestBackupOperation_VerifyDistinctBases() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
test.errCheck(suite.T(), verifyDistinctBases(ctx, test.input, fault.New(true)))
|
test.errCheck(suite.T(), verifyDistinctBases(ctx, test.input))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -55,7 +55,7 @@ const (
|
|||||||
type operation struct {
|
type operation struct {
|
||||||
CreatedAt time.Time `json:"createdAt"`
|
CreatedAt time.Time `json:"createdAt"`
|
||||||
|
|
||||||
Errors *fault.Errors `json:"errors"`
|
Errors *fault.Bus `json:"errors"`
|
||||||
Options control.Options `json:"options"`
|
Options control.Options `json:"options"`
|
||||||
Status opStatus `json:"status"`
|
Status opStatus `json:"status"`
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func connectToM365(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*connector.GraphConnector, error) {
|
) (*connector.GraphConnector, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365"))
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365"))
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@ -104,7 +104,7 @@ type restorer interface {
|
|||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.Path,
|
||||||
bc kopia.ByteCounter,
|
bc kopia.ByteCounter,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error)
|
) ([]data.RestoreCollection, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,12 +153,12 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
With("err", err).
|
With("err", err).
|
||||||
Errorw("doing restore", clues.InErr(err).Slice()...)
|
Errorw("doing restore", clues.InErr(err).Slice()...)
|
||||||
op.Errors.Fail(errors.Wrap(err, "doing restore"))
|
op.Errors.Fail(errors.Wrap(err, "doing restore"))
|
||||||
opStats.readErr = op.Errors.Err()
|
opStats.readErr = op.Errors.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: the consumer (sdk or cli) should run this, not operations.
|
// TODO: the consumer (sdk or cli) should run this, not operations.
|
||||||
recoverableCount := len(op.Errors.Errs())
|
recoverableCount := len(op.Errors.Recovered())
|
||||||
for i, err := range op.Errors.Errs() {
|
for i, err := range op.Errors.Recovered() {
|
||||||
logger.Ctx(ctx).
|
logger.Ctx(ctx).
|
||||||
With("error", err).
|
With("error", err).
|
||||||
With(clues.InErr(err).Slice()...).
|
With(clues.InErr(err).Slice()...).
|
||||||
@ -172,9 +172,9 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
err = op.persistResults(ctx, start, &opStats)
|
err = op.persistResults(ctx, start, &opStats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
op.Errors.Fail(errors.Wrap(err, "persisting restore results"))
|
op.Errors.Fail(errors.Wrap(err, "persisting restore results"))
|
||||||
opStats.writeErr = op.Errors.Err()
|
opStats.writeErr = op.Errors.Failure()
|
||||||
|
|
||||||
return nil, op.Errors.Err()
|
return nil, op.Errors.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("completed restore", "results", op.Results)
|
logger.Ctx(ctx).Infow("completed restore", "results", op.Results)
|
||||||
@ -340,7 +340,7 @@ func formatDetailsForRestoration(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) ([]path.Path, error) {
|
) ([]path.Path, error) {
|
||||||
fds, err := sel.Reduce(ctx, deets, errs)
|
fds, err := sel.Reduce(ctx, deets, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -351,17 +351,17 @@ func formatDetailsForRestoration(
|
|||||||
fdsPaths = fds.Paths()
|
fdsPaths = fds.Paths()
|
||||||
paths = make([]path.Path, len(fdsPaths))
|
paths = make([]path.Path, len(fdsPaths))
|
||||||
shortRefs = make([]string, len(fdsPaths))
|
shortRefs = make([]string, len(fdsPaths))
|
||||||
et = errs.Tracker()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := range fdsPaths {
|
for i := range fdsPaths {
|
||||||
if et.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := path.FromDataLayerPath(fdsPaths[i], true)
|
p, err := path.FromDataLayerPath(fdsPaths[i], true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
et.Add(clues.
|
el.AddRecoverable(clues.
|
||||||
Wrap(err, "parsing details path after reduction").
|
Wrap(err, "parsing details path after reduction").
|
||||||
WithMap(clues.In(ctx)).
|
WithMap(clues.In(ctx)).
|
||||||
With("path", fdsPaths[i]))
|
With("path", fdsPaths[i]))
|
||||||
@ -386,5 +386,5 @@ func formatDetailsForRestoration(
|
|||||||
|
|
||||||
logger.Ctx(ctx).With("short_refs", shortRefs).Infof("found %d details entries to restore", len(shortRefs))
|
logger.Ctx(ctx).With("short_refs", shortRefs).Infof("found %d details entries to restore", len(shortRefs))
|
||||||
|
|
||||||
return paths, et.Err()
|
return paths, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -290,7 +290,6 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
|||||||
ds, err := ro.Run(ctx)
|
ds, err := ro.Run(ctx)
|
||||||
|
|
||||||
require.NoError(t, err, "restoreOp.Run()")
|
require.NoError(t, err, "restoreOp.Run()")
|
||||||
require.Empty(t, ro.Errors.Errs(), "restoreOp.Run() recoverable errors")
|
|
||||||
require.NotEmpty(t, ro.Results, "restoreOp results")
|
require.NotEmpty(t, ro.Results, "restoreOp results")
|
||||||
require.NotNil(t, ds, "restored details")
|
require.NotNil(t, ds, "restored details")
|
||||||
assert.Equal(t, ro.Status, Completed, "restoreOp status")
|
assert.Equal(t, ro.Status, Completed, "restoreOp status")
|
||||||
@ -299,8 +298,8 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
|||||||
assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written")
|
assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written")
|
||||||
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
||||||
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
||||||
assert.NoError(t, ro.Errors.Err(), "non-recoverable error")
|
assert.NoError(t, ro.Errors.Failure(), "non-recoverable error")
|
||||||
assert.Empty(t, ro.Errors.Errs(), "recoverable errors")
|
assert.Empty(t, ro.Errors.Recovered(), "recoverable errors")
|
||||||
assert.NoError(t, ro.Results.ReadErrors, "errors while reading restore data")
|
assert.NoError(t, ro.Results.ReadErrors, "errors while reading restore data")
|
||||||
assert.NoError(t, ro.Results.WriteErrors, "errors while writing restore data")
|
assert.NoError(t, ro.Results.WriteErrors, "errors while writing restore data")
|
||||||
assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
|
assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
|
||||||
|
|||||||
@ -47,7 +47,7 @@ const (
|
|||||||
func (ss *streamStore) WriteBackupDetails(
|
func (ss *streamStore) WriteBackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupDetails *details.Details,
|
backupDetails *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
// construct the path of the container for the `details` item
|
// construct the path of the container for the `details` item
|
||||||
p, err := path.Builder{}.
|
p, err := path.Builder{}.
|
||||||
@ -95,7 +95,7 @@ func (ss *streamStore) WriteBackupDetails(
|
|||||||
func (ss *streamStore) ReadBackupDetails(
|
func (ss *streamStore) ReadBackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
detailsID string,
|
detailsID string,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.Details, error) {
|
) (*details.Details, error) {
|
||||||
// construct the path for the `details` item
|
// construct the path for the `details` item
|
||||||
detailsPath, err := path.Builder{}.
|
detailsPath, err := path.Builder{}.
|
||||||
@ -195,7 +195,7 @@ func (dc *streamCollection) DoNotMergeItems() bool {
|
|||||||
|
|
||||||
// Items() always returns a channel with a single data.Stream
|
// Items() always returns a channel with a single data.Stream
|
||||||
// representing the object to be persisted
|
// representing the object to be persisted
|
||||||
func (dc *streamCollection) Items(context.Context, *fault.Errors) <-chan data.Stream {
|
func (dc *streamCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
|
||||||
items := make(chan data.Stream, 1)
|
items := make(chan data.Stream, 1)
|
||||||
defer close(items)
|
defer close(items)
|
||||||
items <- dc.item
|
items <- dc.item
|
||||||
|
|||||||
@ -37,8 +37,8 @@ type Backup struct {
|
|||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
|
|
||||||
// Errors contains all errors aggregated during a backup operation.
|
// Errors contains all errors aggregated during a backup operation.
|
||||||
Errors fault.ErrorsData `json:"errors"`
|
Errors fault.Errors `json:"errors"`
|
||||||
ErrorCount int `json:"errorCount"`
|
ErrorCount int `json:"errorCount"`
|
||||||
|
|
||||||
// stats are embedded so that the values appear as top-level properties
|
// stats are embedded so that the values appear as top-level properties
|
||||||
stats.Errs // Deprecated, replaced with Errors.
|
stats.Errs // Deprecated, replaced with Errors.
|
||||||
@ -55,12 +55,12 @@ func New(
|
|||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
rw stats.ReadWrites,
|
rw stats.ReadWrites,
|
||||||
se stats.StartAndEndTime,
|
se stats.StartAndEndTime,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) *Backup {
|
) *Backup {
|
||||||
errData := errs.Data()
|
errData := errs.Errors()
|
||||||
|
|
||||||
errCount := len(errs.Data().Errs)
|
errCount := len(errs.Errors().Recovered)
|
||||||
if errData.Err != nil {
|
if errData.Failure != nil {
|
||||||
errCount++
|
errCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,12 +169,12 @@ func (b Backup) countErrors() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// future tracking
|
// future tracking
|
||||||
if b.Errors.Err != nil || len(b.Errors.Errs) > 0 {
|
if b.Errors.Failure != nil || len(b.Errors.Recovered) > 0 {
|
||||||
if b.Errors.Err != nil {
|
if b.Errors.Failure != nil {
|
||||||
errCount++
|
errCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
errCount += len(b.Errors.Errs)
|
errCount += len(b.Errors.Recovered)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errCount
|
return errCount
|
||||||
|
|||||||
@ -41,8 +41,8 @@ func stubBackup(t time.Time) backup.Backup {
|
|||||||
DetailsID: "details",
|
DetailsID: "details",
|
||||||
Status: "status",
|
Status: "status",
|
||||||
Selector: sel.Selector,
|
Selector: sel.Selector,
|
||||||
Errors: fault.ErrorsData{
|
Errors: fault.Errors{
|
||||||
Errs: []error{errors.New("read"), errors.New("write")},
|
Recovered: []error{errors.New("read"), errors.New("write")},
|
||||||
},
|
},
|
||||||
Errs: stats.Errs{
|
Errs: stats.Errs{
|
||||||
ReadErrors: errors.New("1"),
|
ReadErrors: errors.New("1"),
|
||||||
|
|||||||
@ -21,18 +21,18 @@ type mockController struct {
|
|||||||
errors any
|
errors any
|
||||||
}
|
}
|
||||||
|
|
||||||
func connectClient() error { return nil }
|
func connectClient() error { return nil }
|
||||||
func dependencyCall() error { return nil }
|
func dependencyCall() error { return nil }
|
||||||
func getIthItem(i int) error { return nil }
|
func getIthItem(i int) error { return nil }
|
||||||
func getData() ([]string, error) { return nil, nil }
|
func getData() ([]string, error) { return nil, nil }
|
||||||
func storeData([]string, *fault.Errors) {}
|
func storeData([]string, *fault.Bus) {}
|
||||||
|
|
||||||
type mockOper struct {
|
type mockOper struct {
|
||||||
Errors *fault.Errors
|
Errors *fault.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
func newOperation() mockOper { return mockOper{fault.New(true)} }
|
func newOperation() mockOper { return mockOper{fault.New(true)} }
|
||||||
func (m mockOper) Run() *fault.Errors { return m.Errors }
|
func (m mockOper) Run() *fault.Bus { return m.Errors }
|
||||||
|
|
||||||
type mockDepenedency struct{}
|
type mockDepenedency struct{}
|
||||||
|
|
||||||
@ -47,44 +47,40 @@ var dependency = mockDepenedency{}
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// ExampleNew highlights assumptions and best practices
|
// ExampleNew highlights assumptions and best practices
|
||||||
// for generating fault.Errors structs.
|
// for generating fault.Bus structs.
|
||||||
func ExampleNew() {
|
func ExampleNew() {
|
||||||
// fault.Errors should only be generated during the construction of
|
// New fault.Bus instances should only get generated during initialization.
|
||||||
// another controller, such as a new Backup or Restore Operations.
|
// Such as when starting up a new Backup or Restore Operation.
|
||||||
// Configurations like failFast are set during construction.
|
// Configuration (eg: failFast) is set during construction and cannot
|
||||||
//
|
// be updated.
|
||||||
// Generating new fault.Errors structs outside of an operation
|
|
||||||
// controller is a smell, and should be avoided. If you need
|
|
||||||
// to aggregate errors, you should accept an interface and pass
|
|
||||||
// an fault.Errors instance into it.
|
|
||||||
ctrl = mockController{
|
ctrl = mockController{
|
||||||
errors: fault.New(false),
|
errors: fault.New(false),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrors_Fail describes the assumptions and best practices
|
// ExampleBus_Fail describes the assumptions and best practices
|
||||||
// for setting the Failure error.
|
// for setting the Failure error.
|
||||||
func ExampleErrors_Fail() {
|
func ExampleBus_Fail() {
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
|
|
||||||
// Fail() is used to record non-recoverable errors.
|
// Fail() is used to record non-recoverable errors.
|
||||||
//
|
//
|
||||||
// Fail() should only get called in the last step before returning
|
// Fail() should only get called in the last step before returning
|
||||||
// a fault.Errors from a controller. In all other cases, you
|
// a fault.Bus from a controller. In all other cases, you
|
||||||
// should simply return an error and expect the upstream controller
|
// can stick to standard golang error handling and expect some upstream
|
||||||
// to call Fail() for you.
|
// controller to call Fail() for you (if necessary).
|
||||||
topLevelHandler := func(errs *fault.Errors) *fault.Errors {
|
topLevelHandler := func(errs *fault.Bus) *fault.Bus {
|
||||||
if err := connectClient(); err != nil {
|
if err := connectClient(); err != nil {
|
||||||
return errs.Fail(err)
|
return errs.Fail(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
if errs := topLevelHandler(errs); errs.Err() != nil {
|
if errs := topLevelHandler(errs); errs.Failure() != nil {
|
||||||
fmt.Println(errs.Err())
|
fmt.Println(errs.Failure())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only the topmost func in the stack should set the Fail() err.
|
// Only the top-most func in the stack should set the failure.
|
||||||
// IE: Fail() is not Wrap(). In lower levels, errors should get
|
// IE: Fail() is not Wrap(). In lower levels, errors should get
|
||||||
// wrapped and returned like normal, and only handled by fault
|
// wrapped and returned like normal, and only handled by fault
|
||||||
// at the end.
|
// at the end.
|
||||||
@ -102,22 +98,30 @@ func ExampleErrors_Fail() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrors_Add describes the assumptions and best practices
|
// ExampleBus_AddRecoverable describes the assumptions and best practices
|
||||||
// for aggregating iterable or recoverable errors.
|
// for aggregating iterable or recoverable errors.
|
||||||
func ExampleErrors_Add() {
|
func ExampleBus_AddRecoverable() {
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
|
|
||||||
// Add() is used to record any recoverable error.
|
// AddRecoverable() is used to record any recoverable error.
|
||||||
//
|
//
|
||||||
// Add() should only get called as the last error handling step
|
// What counts as a recoverable error? That's up to the given
|
||||||
// within a loop or stream. In all other cases, you can return
|
// implementation. Normally, it's an inability to process one
|
||||||
// an error like normal and expect the upstream point of iteration
|
// of many items within an iteration (ex: couldn't download 1 of
|
||||||
// to call Add() for you.
|
// 1000 emails). But just because an error occurred during a loop
|
||||||
|
// doesn't mean it's recoverable, ex: a failure to retrieve the next
|
||||||
|
// page when accumulating a batch of resources isn't usually
|
||||||
|
// recoverable. The choice is always up to the function at hand.
|
||||||
|
//
|
||||||
|
// AddRecoverable() should only get called as the top-most location
|
||||||
|
// of error handling within the recoverable process. Child functions
|
||||||
|
// should stick to normal golang error handling and expect the upstream
|
||||||
|
// controller to call AddRecoverable() for you.
|
||||||
for i := range items {
|
for i := range items {
|
||||||
clientBasedGetter := func(i int) error {
|
clientBasedGetter := func(i int) error {
|
||||||
if err := getIthItem(i); err != nil {
|
if err := getIthItem(i); err != nil {
|
||||||
// lower level calls don't Add to the fault.Errors.
|
// lower level calls don't AddRecoverable to the fault.Bus.
|
||||||
// they handl errors like normal.
|
// they stick to normal golang error handling.
|
||||||
return errors.Wrap(err, "dependency")
|
return errors.Wrap(err, "dependency")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,154 +130,158 @@ func ExampleErrors_Add() {
|
|||||||
|
|
||||||
if err := clientBasedGetter(i); err != nil {
|
if err := clientBasedGetter(i); err != nil {
|
||||||
// Here at the top of the loop is the correct place
|
// Here at the top of the loop is the correct place
|
||||||
// to Add an error using fault.
|
// to aggregate the error using fault.
|
||||||
errs.Add(err)
|
// Side note: technically, you should use a local bus
|
||||||
|
// here (see below) instead of errs.
|
||||||
|
errs.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iteration should exit anytime the primary error in fault is
|
// Iteration should exit anytime the fault failure is non-nil.
|
||||||
// non-nil. fault.Errors does not expose the failFast flag
|
// fault.Bus does not expose the failFast flag directly. Instead,
|
||||||
// directly. Instead, errors from Add() will automatically
|
// when failFast is true, errors from AddRecoverable() automatically
|
||||||
// promote to the Err() value. Therefore, loops only ned to
|
// promote to the Failure() spot. Recoverable handling only needs to
|
||||||
// check the errs.Err(). If it is non-nil, then the loop should break.
|
// check the errs.Failure(). If it is non-nil, then the loop should break.
|
||||||
for i := range items {
|
for i := range items {
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
// if failFast == true errs.Add() was called,
|
// if failFast == true errs.AddRecoverable() was called,
|
||||||
// we'll catch the error here.
|
// we'll catch the error here.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := getIthItem(i); err != nil {
|
if err := getIthItem(i); err != nil {
|
||||||
errs.Add(err)
|
errs.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrors_Err describes retrieving the non-recoverable error.
|
// ExampleBus_Failure describes retrieving the non-recoverable error.
|
||||||
func ExampleErrors_Err() {
|
func ExampleBus_Failure() {
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
errs.Fail(errors.New("catastrophe"))
|
errs.Fail(errors.New("catastrophe"))
|
||||||
|
|
||||||
// Err() returns the primary failure.
|
// Failure() returns the primary failure.
|
||||||
err := errs.Err()
|
err := errs.Failure()
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
|
||||||
// if multiple Failures occur, each one after the first gets
|
// if multiple Failures occur, each one after the first gets
|
||||||
// added to the Errs slice.
|
// added to the Recoverable slice as an overflow measure.
|
||||||
errs.Fail(errors.New("another catastrophe"))
|
errs.Fail(errors.New("another catastrophe"))
|
||||||
errSl := errs.Errs()
|
errSl := errs.Recovered()
|
||||||
|
|
||||||
for _, e := range errSl {
|
for _, e := range errSl {
|
||||||
fmt.Println(e)
|
fmt.Println(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If Err() is nil, then you can assume the operation completed.
|
// If Failure() is nil, then you can assume the operation completed.
|
||||||
// A complete operation is not necessarily an error-free operation.
|
// A complete operation is not necessarily an error-free operation.
|
||||||
// Recoverable errors may still have been added using Add(err).
|
// Recoverable errors may still have been added using AddRecoverable(err).
|
||||||
//
|
|
||||||
// Even if Err() is nil, Errs() can be non-empty.
|
|
||||||
// Make sure you check both.
|
// Make sure you check both.
|
||||||
|
|
||||||
errs = fault.New(true)
|
// If failFast is set to true, then the first recoerable error Added gets
|
||||||
|
|
||||||
// If failFast is set to true, then the first error Add()ed gets
|
|
||||||
// promoted to the Err() position.
|
// promoted to the Err() position.
|
||||||
|
errs = fault.New(true)
|
||||||
errs.Add(errors.New("not catastrophic, but still becomes the Err()"))
|
errs.AddRecoverable(errors.New("not catastrophic, but still becomes the Failure()"))
|
||||||
err = errs.Err()
|
err = errs.Failure()
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
|
||||||
// Output: catastrophe
|
// Output: catastrophe
|
||||||
// another catastrophe
|
// another catastrophe
|
||||||
// not catastrophic, but still becomes the Err()
|
// not catastrophic, but still becomes the Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrors_Errs describes retrieving individual errors.
|
// ExampleBus_Recovered describes the errors that processing was able to
|
||||||
func ExampleErrors_Errs() {
|
// recover from and continue.
|
||||||
|
func ExampleErrors_Recovered() {
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
errs.Add(errors.New("not catastrophic"))
|
errs.AddRecoverable(errors.New("not catastrophic"))
|
||||||
errs.Add(errors.New("something unwanted"))
|
errs.AddRecoverable(errors.New("something unwanted"))
|
||||||
|
|
||||||
// Errs() gets the slice of all recoverable errors Add()ed during
|
// Recovered() gets the slice of all recoverable errors added during
|
||||||
// the run, but which did not force the process to exit.
|
// the run, but which did not cause a failure.
|
||||||
//
|
//
|
||||||
// Errs() only needs to be investigated by the end user at the
|
// Recovered() should never be investigated during lower level processing.
|
||||||
// conclusion of an operation. Checking Errs() within lower-
|
// Implementation only ever needs to check Failure(). If an error didn't
|
||||||
// layer code is a smell. Funcs should return a standard error,
|
// promote to the Failure slot, then it should be ignored.
|
||||||
// or errs.Err(), if they need upstream handlers to handle the errors.
|
//
|
||||||
errSl := errs.Errs()
|
// The end user, at the conclusion of an operation, is the intended recipient
|
||||||
|
// of the Recovered error slice. After returning to the interface layer
|
||||||
|
// (the CLI or SDK), it's the job of the end user at that location to
|
||||||
|
// iterate through those errors and record them as wanted.
|
||||||
|
errSl := errs.Recovered()
|
||||||
for _, err := range errSl {
|
for _, err := range errSl {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// One or more errors in errs.Errs() does not necessarily mean the
|
// One or more errors in errs.Recovered() does not necessarily mean the
|
||||||
// process failed. You can have non-zero Errs() but a nil Err().
|
// process failed. You can have non-zero Recovered() but a nil Failure().
|
||||||
if errs.Err() == nil {
|
if errs.Failure() == nil {
|
||||||
fmt.Println("Err() is nil")
|
fmt.Println("Failure() is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If Errs() is nil, then you can assume that no recoverable or
|
// Inversely, if Recovered() is nil, then you can assume that no recoverable
|
||||||
// iteration-based errors occurred. But that does not necessarily
|
// or iteration-based errors occurred. But that does not necessarily
|
||||||
// mean the operation was able to complete.
|
// mean the operation was able to complete.
|
||||||
//
|
//
|
||||||
// Even if Errs() contains zero items, Err() can be non-nil.
|
// Even if Recovered() contains zero items, Err() can be non-nil.
|
||||||
// Make sure you check both.
|
// Make sure you check both.
|
||||||
|
|
||||||
// Output: not catastrophic
|
// Output: not catastrophic
|
||||||
// something unwanted
|
// something unwanted
|
||||||
// Err() is nil
|
// Failure() is nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleErrors_Tracker() {
|
func ExampleBus_Local() {
|
||||||
// It is common for Corso to run operations in parallel,
|
// It is common for Corso to run operations in parallel,
|
||||||
// and for iterations to be nested within iterations. To
|
// and for iterations to be nested within iterations. To
|
||||||
// avoid mistakenly returning an error that was sourced
|
// avoid mistakenly returning an error that was sourced
|
||||||
// from some other async iteration, recoverable instances
|
// from some other async iteration, recoverable instances
|
||||||
// are aggrgated into a Tracker.
|
// are aggrgated into a Local.
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
trkr := errs.Tracker()
|
el := errs.Local()
|
||||||
|
|
||||||
err := func() error {
|
err := func() error {
|
||||||
for i := range items {
|
for i := range items {
|
||||||
if trkr.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := getIthItem(i); err != nil {
|
if err := getIthItem(i); err != nil {
|
||||||
// instead of calling errs.Add(err), we call the
|
// instead of calling errs.AddRecoverable(err), we call the
|
||||||
// trackers Add method. The error will still get
|
// local bus's Add method. The error will still get
|
||||||
// added to the errs.Errs() set. But if this err
|
// added to the errs.Recovered() set. But if this err
|
||||||
// causes the run to fail, only this tracker treats
|
// causes the run to fail, only this local bus treats
|
||||||
// it as the causal failure.
|
// it as the causal failure.
|
||||||
trkr.Add(err)
|
el.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return trkr.Err()
|
return el.Failure()
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// handle the Err() that appeared in the tracker
|
// handle the Failure() that appeared in the local bus.
|
||||||
fmt.Println("err occurred", errs.Err())
|
fmt.Println("failure occurred", errs.Failure())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrorsE2e showcases a more complex integration.
|
// ExampleE2e showcases a more complex integration.
|
||||||
func Example_errors_e2e() {
|
func Example_e2e() {
|
||||||
oper := newOperation()
|
oper := newOperation()
|
||||||
|
|
||||||
// imagine that we're a user, calling into corso SDK.
|
// imagine that we're a user, calling into corso SDK.
|
||||||
// (fake funcs used here to minimize example bloat)
|
// (fake funcs used here to minimize example bloat)
|
||||||
//
|
//
|
||||||
// The operation is our controller, we expect it to
|
// The operation is our controller, we expect it to
|
||||||
// generate a new fault.Errors when constructed, and
|
// generate a new fault.Bus when constructed, and
|
||||||
// to return that struct when we call Run()
|
// to return that struct when we call Run()
|
||||||
errs := oper.Run()
|
errs := oper.Run()
|
||||||
|
|
||||||
// Let's investigate what went on inside. Since we're at
|
// Let's investigate what went on inside. Since we're at
|
||||||
// the top of our controller, and returning a fault.Errors,
|
// the top of our controller, and returning a fault.Bus,
|
||||||
// all the error handlers set the Fail() case.
|
// all the error handlers set the Fail() case.
|
||||||
|
|
||||||
/* Run() */
|
/* Run() */
|
||||||
func() *fault.Errors {
|
func() *fault.Bus {
|
||||||
if err := connectClient(); err != nil {
|
if err := connectClient(); err != nil {
|
||||||
// Fail() here; we're top level in the controller
|
// Fail() here; we're top level in the controller
|
||||||
// and this is a non-recoverable issue
|
// and this is a non-recoverable issue
|
||||||
@ -297,12 +305,13 @@ func Example_errors_e2e() {
|
|||||||
|
|
||||||
// What about the lower level handling? storeData didn't
|
// What about the lower level handling? storeData didn't
|
||||||
// return an error, so what's happening there?
|
// return an error, so what's happening there?
|
||||||
|
|
||||||
/* storeData */
|
/* storeData */
|
||||||
func(data []any, errs *fault.Errors) {
|
err := func(data []any, errs *fault.Bus) error {
|
||||||
// this is downstream in our code somewhere
|
// this is downstream in our code somewhere
|
||||||
storer := func(a any) error {
|
storer := func(a any) error {
|
||||||
if err := dependencyCall(); err != nil {
|
if err := dependencyCall(); err != nil {
|
||||||
// we're not passing in or calling fault.Errors here,
|
// we're not passing in or calling fault.Bus here,
|
||||||
// because this isn't the iteration handler, it's just
|
// because this isn't the iteration handler, it's just
|
||||||
// a regular error.
|
// a regular error.
|
||||||
return errors.Wrap(err, "dependency")
|
return errors.Wrap(err, "dependency")
|
||||||
@ -311,36 +320,48 @@ func Example_errors_e2e() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
el := errs.Local()
|
||||||
|
|
||||||
for _, d := range data {
|
for _, d := range data {
|
||||||
if errs.Err() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := storer(d); err != nil {
|
if err := storer(d); err != nil {
|
||||||
// Since we're at the top of the iteration, we need
|
// Since we're at the top of the iteration, we need
|
||||||
// to add each error to the fault.Errors struct.
|
// to add each error to the fault.localBus struct.
|
||||||
errs.Add(err)
|
el.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(nil, nil)
|
|
||||||
|
|
||||||
// then at the end of the oper.Run, we investigate the results.
|
// at the end of the func, we need to return local.Failure()
|
||||||
if errs.Err() != nil {
|
// just in case the local bus promoted an error to the failure
|
||||||
// handle the primary error
|
// position. If we don't return it like normal error handling,
|
||||||
fmt.Println("err occurred", errs.Err())
|
// then we'll lose scope of that error.
|
||||||
|
return el.Failure()
|
||||||
|
}(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("errored", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, err := range errs.Errs() {
|
// At the end of the oper.Run, when returning to the interface
|
||||||
|
// layer, we investigate the results.
|
||||||
|
if errs.Failure() != nil {
|
||||||
|
// handle the primary error
|
||||||
|
fmt.Println("err occurred", errs.Failure())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, err := range errs.Recovered() {
|
||||||
// handle each recoverable error
|
// handle each recoverable error
|
||||||
fmt.Println("recoverable err occurred", err)
|
fmt.Println("recoverable err occurred", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleErrors_Err_return showcases when to return err or nil vs errs.Err()
|
// ExampleBus_Failure_return showcases when to return an error or
|
||||||
func ExampleErrors_Err_return() {
|
// nil vs errs.Failure() vs *fault.Bus
|
||||||
// The general rule of thumb is to always handle the error directly
|
func ExampleErrors_Failure_return() {
|
||||||
// by returning err, or nil, or any variety of extension (wrap,
|
// The general rule of thumb is stick to standard golang error
|
||||||
// stack, clues, etc).
|
// handling whenever possible.
|
||||||
fn := func() error {
|
fn := func() error {
|
||||||
if err := dependency.do(); err != nil {
|
if err := dependency.do(); err != nil {
|
||||||
return errors.Wrap(err, "direct")
|
return errors.Wrap(err, "direct")
|
||||||
@ -352,26 +373,47 @@ func ExampleErrors_Err_return() {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The exception is if you're handling recoverable errors. Those
|
// The first exception is if you're handling recoverable errors. Recoverable
|
||||||
// funcs should always return errs.Err(), in case a recoverable
|
// error handling should create a local bus instance, and return localBus.Failure()
|
||||||
// error happened on the last round of iteration.
|
// so that the immediate upstream caller can be made aware of the current failure.
|
||||||
fn2 := func(todo []string, errs *fault.Errors) error {
|
fn2 := func(todo []string, errs *fault.Bus) error {
|
||||||
for range todo {
|
for range todo {
|
||||||
if errs.Err() != nil {
|
if errs.Failure() != nil {
|
||||||
return errs.Err()
|
return errs.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dependency.do(); err != nil {
|
if err := dependency.do(); err != nil {
|
||||||
errs.Add(errors.Wrap(err, "recoverable"))
|
errs.AddRecoverable(errors.Wrap(err, "recoverable"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.Err()
|
return errs.Failure()
|
||||||
}
|
}
|
||||||
if err := fn2([]string{"a"}, fault.New(true)); err != nil {
|
if err := fn2([]string{"a"}, fault.New(true)); err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The second exception is if you're returning at the interface layer.
|
||||||
|
// In that case, you're expected to return the fault.Bus itself, so that
|
||||||
|
// callers can review the fault data.
|
||||||
|
operationFn := func(errs *fault.Bus) *fault.Bus {
|
||||||
|
if _, err := getData(); err != nil {
|
||||||
|
return errs.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
fbus := operationFn(fault.New(true))
|
||||||
|
|
||||||
|
if fbus.Failure() != nil {
|
||||||
|
fmt.Println("failure", fbus.Failure())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, err := range fbus.Recovered() {
|
||||||
|
fmt.Println("recovered", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Output: direct: caught one
|
// Output: direct: caught one
|
||||||
// recoverable: caught one
|
// recoverable: caught one
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,21 +6,21 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Errors struct {
|
type Bus struct {
|
||||||
mu *sync.Mutex
|
mu *sync.Mutex
|
||||||
|
|
||||||
// err identifies non-recoverable errors. This includes
|
// Failure probably identifies errors that were added to the bus
|
||||||
// non-start cases (ex: cannot connect to client), hard-
|
// or localBus via AddRecoverable, but which were promoted
|
||||||
// stop issues (ex: credentials expired) or conscious exit
|
// to the failure position due to failFast=true configuration.
|
||||||
// cases (ex: iteration error + failFast config).
|
// Alternatively, the process controller might have set failure
|
||||||
err error
|
// by calling Fail(err).
|
||||||
|
failure error
|
||||||
|
|
||||||
// errs is the accumulation of recoverable or iterated
|
// recoverable is the accumulation of recoverable errors.
|
||||||
// errors. Eg: if a process is retrieving N items, and
|
// Eg: if a process is retrieving N items, and 1 of the
|
||||||
// 1 of the items fails to be retrieved, but the rest of
|
// items fails to be retrieved, but the rest of them succeed,
|
||||||
// them succeed, we'd expect to see 1 error added to this
|
// we'd expect to see 1 error added to this slice.
|
||||||
// slice.
|
recoverable []error
|
||||||
errs []error
|
|
||||||
|
|
||||||
// if failFast is true, the first errs addition will
|
// if failFast is true, the first errs addition will
|
||||||
// get promoted to the err value. This signifies a
|
// get promoted to the err value. This signifies a
|
||||||
@ -29,51 +29,71 @@ type Errors struct {
|
|||||||
failFast bool
|
failFast bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorsData provides the errors data alone, without sync
|
// Errors provides the errors data alone, without sync
|
||||||
// controls, allowing the data to be persisted.
|
// controls, allowing the data to be persisted.
|
||||||
type ErrorsData struct {
|
type Errors struct {
|
||||||
Err error `json:"-"`
|
// Failure identifies a non-recoverable error. This includes
|
||||||
Errs []error `json:"-"`
|
// non-start cases (ex: cannot connect to client), hard-
|
||||||
FailFast bool `json:"failFast"`
|
// stop issues (ex: credentials expired) or conscious exit
|
||||||
|
// cases (ex: iteration error + failFast config).
|
||||||
|
Failure error `json:"failure"`
|
||||||
|
|
||||||
|
// Recovered errors accumulate through a runtime under
|
||||||
|
// best-effort processing conditions. They imply that an
|
||||||
|
// error occurred, but the process was able to move on and
|
||||||
|
// complete afterwards.
|
||||||
|
// Eg: if a process is retrieving N items, and 1 of the
|
||||||
|
// items fails to be retrieved, but the rest of them succeed,
|
||||||
|
// we'd expect to see 1 error added to this slice.
|
||||||
|
Recovered []error `json:"-"`
|
||||||
|
|
||||||
|
// If FailFast is true, then the first Recoverable error will
|
||||||
|
// promote to the Failure spot, causing processing to exit.
|
||||||
|
FailFast bool `json:"failFast"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a new error with default values in place.
|
// New constructs a new error with default values in place.
|
||||||
func New(failFast bool) *Errors {
|
func New(failFast bool) *Bus {
|
||||||
return &Errors{
|
return &Bus{
|
||||||
mu: &sync.Mutex{},
|
mu: &sync.Mutex{},
|
||||||
errs: []error{},
|
recoverable: []error{},
|
||||||
failFast: failFast,
|
failFast: failFast,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Err returns the primary error. If not nil, this
|
// Failure returns the primary error. If not nil, this
|
||||||
// indicates the operation exited prior to completion.
|
// indicates the operation exited prior to completion.
|
||||||
func (e *Errors) Err() error {
|
func (e *Bus) Failure() error {
|
||||||
return e.err
|
return e.failure
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errs returns the slice of recoverable and
|
// Recovered returns the slice of errors that occurred in
|
||||||
// iterated errors.
|
// recoverable points of processing. This is often during
|
||||||
func (e *Errors) Errs() []error {
|
// iteration where a single failure (ex: retrieving an item),
|
||||||
return e.errs
|
// doesn't require the entire process to end.
|
||||||
|
func (e *Bus) Recovered() []error {
|
||||||
|
return e.recoverable
|
||||||
}
|
}
|
||||||
|
|
||||||
// Data returns the plain set of error data
|
// Errors returns the plain record of errors that were aggregated
|
||||||
// without any sync properties.
|
// within a fult Bus.
|
||||||
func (e *Errors) Data() ErrorsData {
|
func (e *Bus) Errors() Errors {
|
||||||
return ErrorsData{
|
return Errors{
|
||||||
Err: e.err,
|
Failure: e.failure,
|
||||||
Errs: slices.Clone(e.errs),
|
Recovered: slices.Clone(e.recoverable),
|
||||||
FailFast: e.failFast,
|
FailFast: e.failFast,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: introduce Failer interface
|
// Fail sets the non-recoverable error (ie: bus.failure)
|
||||||
|
// in the bus. If a failure error is already present,
|
||||||
// Fail sets the non-recoverable error (ie: errors.err)
|
// the error gets added to the recoverable slice for
|
||||||
// in the errors struct. If a non-recoverable error is
|
// purposes of tracking.
|
||||||
// already present, the error gets added to the errs slice.
|
//
|
||||||
func (e *Errors) Fail(err error) *Errors {
|
// TODO: Return Data, not Bus. The consumers of a failure
|
||||||
|
// should care about the state of data, not the communication
|
||||||
|
// pattern.
|
||||||
|
func (e *Bus) Fail(err error) *Bus {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
@ -81,28 +101,33 @@ func (e *Errors) Fail(err error) *Errors {
|
|||||||
e.mu.Lock()
|
e.mu.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
return e.setErr(err)
|
return e.setFailure(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setErr handles setting errors.err. Sync locking gets
|
// setErr handles setting bus.failure. Sync locking gets
|
||||||
// handled upstream of this call.
|
// handled upstream of this call.
|
||||||
func (e *Errors) setErr(err error) *Errors {
|
func (e *Bus) setFailure(err error) *Bus {
|
||||||
if e.err == nil {
|
if e.failure == nil {
|
||||||
e.err = err
|
e.failure = err
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
e.errs = append(e.errs, err)
|
// technically not a recoverable error: we're using the
|
||||||
|
// recoverable slice as an overflow container here to
|
||||||
|
// ensure everything is tracked.
|
||||||
|
e.recoverable = append(e.recoverable, err)
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add appends the error to the slice of recoverable and
|
// AddRecoverable appends the error to the slice of recoverable
|
||||||
// iterated errors (ie: errors.errs). If failFast is true,
|
// errors (ie: bus.recoverable). If failFast is true, the first
|
||||||
// the first Added error will get copied to errors.err,
|
// added error will get copied to bus.failure, causing the bus
|
||||||
// causing the errors struct to identify as non-recoverably
|
// to identify as non-recoverably failed.
|
||||||
// failed.
|
//
|
||||||
func (e *Errors) Add(err error) *Errors {
|
// TODO: nil return, not Bus, since we don't want people to return
|
||||||
|
// from errors.AddRecoverable().
|
||||||
|
func (e *Bus) AddRecoverable(err error) *Bus {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
@ -110,44 +135,44 @@ func (e *Errors) Add(err error) *Errors {
|
|||||||
e.mu.Lock()
|
e.mu.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
return e.addErr(err)
|
return e.addRecoverableErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// addErr handles adding errors to errors.errs. Sync locking
|
// addErr handles adding errors to errors.errs. Sync locking
|
||||||
// gets handled upstream of this call.
|
// gets handled upstream of this call.
|
||||||
func (e *Errors) addErr(err error) *Errors {
|
func (e *Bus) addRecoverableErr(err error) *Bus {
|
||||||
if e.err == nil && e.failFast {
|
if e.failure == nil && e.failFast {
|
||||||
e.setErr(err)
|
e.setFailure(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.errs = append(e.errs, err)
|
e.recoverable = append(e.recoverable, err)
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Iteration Tracker
|
// Local aggregator
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Tracker constructs a new errors tracker for aggregating errors
|
// Local constructs a new local bus to handle error aggregation in a
|
||||||
// in a single iteration loop. Trackers shouldn't be passed down
|
// constrained scope. Local busses shouldn't be passed down to other
|
||||||
// to other funcs, and the function that spawned the tracker should
|
// funcs, and the function that spawned the local bus should always
|
||||||
// always return `tracker.Err()` to ensure that hard failures are
|
// return `local.Failure()` to ensure that hard failures are propagated
|
||||||
// propagated upstream.
|
// back upstream.
|
||||||
func (e *Errors) Tracker() *tracker {
|
func (e *Bus) Local() *localBus {
|
||||||
return &tracker{
|
return &localBus{
|
||||||
mu: &sync.Mutex{},
|
mu: &sync.Mutex{},
|
||||||
errs: e,
|
bus: e,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type tracker struct {
|
type localBus struct {
|
||||||
mu *sync.Mutex
|
mu *sync.Mutex
|
||||||
errs *Errors
|
bus *Bus
|
||||||
current error
|
current error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *tracker) Add(err error) {
|
func (e *localBus) AddRecoverable(err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -155,18 +180,18 @@ func (e *tracker) Add(err error) {
|
|||||||
e.mu.Lock()
|
e.mu.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
if e.current == nil && e.errs.failFast {
|
if e.current == nil && e.bus.failFast {
|
||||||
e.current = err
|
e.current = err
|
||||||
}
|
}
|
||||||
|
|
||||||
e.errs.Add(err)
|
e.bus.AddRecoverable(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Err returns the primary error in the tracker. Will be nil if the
|
// Failure returns the failure that happened within the local bus.
|
||||||
// original Errors is set to bestEffort handling. Does not return the
|
// It does not return the underlying bus.Failure(), only the failure
|
||||||
// underlying Errors.Err(). Should be called as the return value of
|
// that was recorded within the local bus instance. This error should
|
||||||
// any func which created a new tracker.
|
// get returned by any func which created a local bus.
|
||||||
func (e *tracker) Err() error {
|
func (e *localBus) Failure() error {
|
||||||
return e.current
|
return e.current
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -75,16 +75,16 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
|
|||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
n := fault.New(test.failFast)
|
n := fault.New(test.failFast)
|
||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
require.NoError(t, n.Err())
|
require.NoError(t, n.Failure())
|
||||||
require.Empty(t, n.Errs())
|
require.Empty(t, n.Recovered())
|
||||||
|
|
||||||
e := n.Fail(test.fail)
|
e := n.Fail(test.fail)
|
||||||
require.NotNil(t, e)
|
require.NotNil(t, e)
|
||||||
|
|
||||||
e = n.Add(test.add)
|
e = n.AddRecoverable(test.add)
|
||||||
require.NotNil(t, e)
|
require.NotNil(t, e)
|
||||||
|
|
||||||
test.expect(t, n.Err())
|
test.expect(t, n.Failure())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,16 +94,16 @@ func (suite *FaultErrorsUnitSuite) TestFail() {
|
|||||||
|
|
||||||
n := fault.New(false)
|
n := fault.New(false)
|
||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
require.NoError(t, n.Err())
|
require.NoError(t, n.Failure())
|
||||||
require.Empty(t, n.Errs())
|
require.Empty(t, n.Recovered())
|
||||||
|
|
||||||
n.Fail(assert.AnError)
|
n.Fail(assert.AnError)
|
||||||
assert.Error(t, n.Err())
|
assert.Error(t, n.Failure())
|
||||||
assert.Empty(t, n.Errs())
|
assert.Empty(t, n.Recovered())
|
||||||
|
|
||||||
n.Fail(assert.AnError)
|
n.Fail(assert.AnError)
|
||||||
assert.Error(t, n.Err())
|
assert.Error(t, n.Failure())
|
||||||
assert.NotEmpty(t, n.Errs())
|
assert.NotEmpty(t, n.Recovered())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *FaultErrorsUnitSuite) TestErrs() {
|
func (suite *FaultErrorsUnitSuite) TestErrs() {
|
||||||
@ -154,10 +154,10 @@ func (suite *FaultErrorsUnitSuite) TestErrs() {
|
|||||||
e := n.Fail(test.fail)
|
e := n.Fail(test.fail)
|
||||||
require.NotNil(t, e)
|
require.NotNil(t, e)
|
||||||
|
|
||||||
e = n.Add(test.add)
|
e = n.AddRecoverable(test.add)
|
||||||
require.NotNil(t, e)
|
require.NotNil(t, e)
|
||||||
|
|
||||||
test.expect(t, n.Errs())
|
test.expect(t, n.Recovered())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,16 +168,16 @@ func (suite *FaultErrorsUnitSuite) TestAdd() {
|
|||||||
n := fault.New(true)
|
n := fault.New(true)
|
||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
|
|
||||||
n.Add(assert.AnError)
|
n.AddRecoverable(assert.AnError)
|
||||||
assert.Error(t, n.Err())
|
assert.Error(t, n.Failure())
|
||||||
assert.Len(t, n.Errs(), 1)
|
assert.Len(t, n.Recovered(), 1)
|
||||||
|
|
||||||
n.Add(assert.AnError)
|
n.AddRecoverable(assert.AnError)
|
||||||
assert.Error(t, n.Err())
|
assert.Error(t, n.Failure())
|
||||||
assert.Len(t, n.Errs(), 2)
|
assert.Len(t, n.Recovered(), 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *FaultErrorsUnitSuite) TestData() {
|
func (suite *FaultErrorsUnitSuite) TestErrors() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
// not fail-fast
|
// not fail-fast
|
||||||
@ -185,12 +185,12 @@ func (suite *FaultErrorsUnitSuite) TestData() {
|
|||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
|
|
||||||
n.Fail(errors.New("fail"))
|
n.Fail(errors.New("fail"))
|
||||||
n.Add(errors.New("1"))
|
n.AddRecoverable(errors.New("1"))
|
||||||
n.Add(errors.New("2"))
|
n.AddRecoverable(errors.New("2"))
|
||||||
|
|
||||||
d := n.Data()
|
d := n.Errors()
|
||||||
assert.Equal(t, n.Err(), d.Err)
|
assert.Equal(t, n.Failure(), d.Failure)
|
||||||
assert.ElementsMatch(t, n.Errs(), d.Errs)
|
assert.ElementsMatch(t, n.Recovered(), d.Recovered)
|
||||||
assert.False(t, d.FailFast)
|
assert.False(t, d.FailFast)
|
||||||
|
|
||||||
// fail-fast
|
// fail-fast
|
||||||
@ -198,12 +198,12 @@ func (suite *FaultErrorsUnitSuite) TestData() {
|
|||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
|
|
||||||
n.Fail(errors.New("fail"))
|
n.Fail(errors.New("fail"))
|
||||||
n.Add(errors.New("1"))
|
n.AddRecoverable(errors.New("1"))
|
||||||
n.Add(errors.New("2"))
|
n.AddRecoverable(errors.New("2"))
|
||||||
|
|
||||||
d = n.Data()
|
d = n.Errors()
|
||||||
assert.Equal(t, n.Err(), d.Err)
|
assert.Equal(t, n.Failure(), d.Failure)
|
||||||
assert.ElementsMatch(t, n.Errs(), d.Errs)
|
assert.ElementsMatch(t, n.Recovered(), d.Recovered)
|
||||||
assert.True(t, d.FailFast)
|
assert.True(t, d.FailFast)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,17 +214,13 @@ func (suite *FaultErrorsUnitSuite) TestMarshalUnmarshal() {
|
|||||||
n := fault.New(false)
|
n := fault.New(false)
|
||||||
require.NotNil(t, n)
|
require.NotNil(t, n)
|
||||||
|
|
||||||
n.Add(errors.New("1"))
|
n.AddRecoverable(errors.New("1"))
|
||||||
n.Add(errors.New("2"))
|
n.AddRecoverable(errors.New("2"))
|
||||||
|
|
||||||
data := n.Data()
|
bs, err := json.Marshal(n.Errors())
|
||||||
|
|
||||||
jsonStr, err := json.Marshal(data)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
um := fault.ErrorsData{}
|
err = json.Unmarshal(bs, &fault.Errors{})
|
||||||
|
|
||||||
err = json.Unmarshal(jsonStr, &um)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,7 +242,7 @@ func (suite *FaultErrorsUnitSuite) TestUnmarshalLegacy() {
|
|||||||
|
|
||||||
t.Logf("jsonStr is %s\n", jsonStr)
|
t.Logf("jsonStr is %s\n", jsonStr)
|
||||||
|
|
||||||
um := fault.ErrorsData{}
|
um := fault.Errors{}
|
||||||
|
|
||||||
err = json.Unmarshal(jsonStr, &um)
|
err = json.Unmarshal(jsonStr, &um)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -255,25 +251,25 @@ func (suite *FaultErrorsUnitSuite) TestUnmarshalLegacy() {
|
|||||||
func (suite *FaultErrorsUnitSuite) TestTracker() {
|
func (suite *FaultErrorsUnitSuite) TestTracker() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
be := fault.New(false)
|
eb := fault.New(false)
|
||||||
|
|
||||||
ba := be.Tracker()
|
lb := eb.Local()
|
||||||
assert.NoError(t, ba.Err())
|
assert.NoError(t, lb.Failure())
|
||||||
assert.Empty(t, be.Errs())
|
assert.Empty(t, eb.Recovered())
|
||||||
|
|
||||||
ba.Add(assert.AnError)
|
lb.AddRecoverable(assert.AnError)
|
||||||
assert.NoError(t, ba.Err())
|
assert.NoError(t, lb.Failure())
|
||||||
assert.NoError(t, be.Err())
|
assert.NoError(t, eb.Failure())
|
||||||
assert.NotEmpty(t, be.Errs())
|
assert.NotEmpty(t, eb.Recovered())
|
||||||
|
|
||||||
fe := fault.New(true)
|
ebt := fault.New(true)
|
||||||
|
|
||||||
fa := fe.Tracker()
|
lbt := ebt.Local()
|
||||||
assert.NoError(t, fa.Err())
|
assert.NoError(t, lbt.Failure())
|
||||||
assert.Empty(t, fe.Errs())
|
assert.Empty(t, ebt.Recovered())
|
||||||
|
|
||||||
fa.Add(assert.AnError)
|
lbt.AddRecoverable(assert.AnError)
|
||||||
assert.Error(t, fa.Err())
|
assert.Error(t, lbt.Failure())
|
||||||
assert.Error(t, fe.Err())
|
assert.Error(t, ebt.Failure())
|
||||||
assert.NotEmpty(t, fe.Errs())
|
assert.NotEmpty(t, ebt.Recovered())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -185,8 +185,8 @@ func runBackupLoadTest(
|
|||||||
assert.Less(t, 0, b.Results.ItemsWritten, "items written")
|
assert.Less(t, 0, b.Results.ItemsWritten, "items written")
|
||||||
assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded")
|
assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded")
|
||||||
assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners")
|
||||||
assert.NoError(t, b.Errors.Err(), "non-recoverable error")
|
assert.NoError(t, b.Errors.Failure(), "non-recoverable error")
|
||||||
assert.Empty(t, b.Errors.Errs(), "recoverable errors")
|
assert.Empty(t, b.Errors.Recovered(), "recoverable errors")
|
||||||
assert.NoError(t, b.Results.ReadErrors, "read errors")
|
assert.NoError(t, b.Results.ReadErrors, "read errors")
|
||||||
assert.NoError(t, b.Results.WriteErrors, "write errors")
|
assert.NoError(t, b.Results.WriteErrors, "write errors")
|
||||||
})
|
})
|
||||||
@ -242,7 +242,7 @@ func runBackupDetailsLoadTest(
|
|||||||
|
|
||||||
t.Run("backup_details_"+name, func(t *testing.T) {
|
t.Run("backup_details_"+name, func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
errs *fault.Errors
|
errs *fault.Bus
|
||||||
b *backup.Backup
|
b *backup.Backup
|
||||||
ds *details.Details
|
ds *details.Details
|
||||||
labels = pprof.Labels("details_load_test", name)
|
labels = pprof.Labels("details_load_test", name)
|
||||||
@ -252,8 +252,8 @@ func runBackupDetailsLoadTest(
|
|||||||
ds, b, errs = r.BackupDetails(ctx, backupID)
|
ds, b, errs = r.BackupDetails(ctx, backupID)
|
||||||
})
|
})
|
||||||
|
|
||||||
require.NoError(t, errs.Err(), "retrieving details in backup "+backupID)
|
require.NoError(t, errs.Failure(), "retrieving details in backup "+backupID)
|
||||||
require.Empty(t, errs.Errs(), "retrieving details in backup "+backupID)
|
require.Empty(t, errs.Recovered(), "retrieving details in backup "+backupID)
|
||||||
require.NotNil(t, ds, "backup details must exist")
|
require.NotNil(t, ds, "backup details must exist")
|
||||||
require.NotNil(t, b, "backup must exist")
|
require.NotNil(t, b, "backup must exist")
|
||||||
|
|
||||||
@ -294,8 +294,8 @@ func doRestoreLoadTest(
|
|||||||
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
||||||
assert.Less(t, 0, r.Results.ItemsWritten, "items written")
|
assert.Less(t, 0, r.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners")
|
||||||
assert.NoError(t, r.Errors.Err(), "non-recoverable error")
|
assert.NoError(t, r.Errors.Failure(), "non-recoverable error")
|
||||||
assert.Empty(t, r.Errors.Errs(), "recoverable errors")
|
assert.Empty(t, r.Errors.Recovered(), "recoverable errors")
|
||||||
assert.NoError(t, r.Results.ReadErrors, "read errors")
|
assert.NoError(t, r.Results.ReadErrors, "read errors")
|
||||||
assert.NoError(t, r.Results.WriteErrors, "write errors")
|
assert.NoError(t, r.Results.WriteErrors, "write errors")
|
||||||
assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items")
|
assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items")
|
||||||
|
|||||||
@ -35,12 +35,12 @@ var ErrorRepoAlreadyExists = errors.New("a repository was already initialized wi
|
|||||||
// repository.
|
// repository.
|
||||||
type BackupGetter interface {
|
type BackupGetter interface {
|
||||||
Backup(ctx context.Context, id model.StableID) (*backup.Backup, error)
|
Backup(ctx context.Context, id model.StableID) (*backup.Backup, error)
|
||||||
Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors)
|
Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Bus)
|
||||||
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
||||||
BackupDetails(
|
BackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupID string,
|
backupID string,
|
||||||
) (*details.Details, *backup.Backup, *fault.Errors)
|
) (*details.Details, *backup.Backup, *fault.Bus)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Repository interface {
|
type Repository interface {
|
||||||
@ -314,7 +314,7 @@ func (r repository) Backup(ctx context.Context, id model.StableID) (*backup.Back
|
|||||||
|
|
||||||
// BackupsByID lists backups by ID. Returns as many backups as possible with
|
// BackupsByID lists backups by ID. Returns as many backups as possible with
|
||||||
// errors for the backups it was unable to retrieve.
|
// errors for the backups it was unable to retrieve.
|
||||||
func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors) {
|
func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Bus) {
|
||||||
var (
|
var (
|
||||||
bups []*backup.Backup
|
bups []*backup.Backup
|
||||||
errs = fault.New(false)
|
errs = fault.New(false)
|
||||||
@ -324,7 +324,7 @@ func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backu
|
|||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
b, err := sw.GetBackup(ctx, id)
|
b, err := sw.GetBackup(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Stack(err).With("backup_id", id))
|
errs.AddRecoverable(clues.Stack(err).With("backup_id", id))
|
||||||
}
|
}
|
||||||
|
|
||||||
bups = append(bups, b)
|
bups = append(bups, b)
|
||||||
@ -343,7 +343,7 @@ func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption)
|
|||||||
func (r repository) BackupDetails(
|
func (r repository) BackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupID string,
|
backupID string,
|
||||||
) (*details.Details, *backup.Backup, *fault.Errors) {
|
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||||
sw := store.NewKopiaStore(r.modelStore)
|
sw := store.NewKopiaStore(r.modelStore)
|
||||||
errs := fault.New(false)
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
|||||||
@ -719,7 +719,7 @@ func (s ExchangeScope) setDefaults() {
|
|||||||
func (s exchange) Reduce(
|
func (s exchange) Reduce(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
return reduce[ExchangeScope](
|
return reduce[ExchangeScope](
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -498,7 +498,7 @@ func (s OneDriveScope) DiscreteCopy(user string) OneDriveScope {
|
|||||||
func (s oneDrive) Reduce(
|
func (s oneDrive) Reduce(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
return reduce[OneDriveScope](
|
return reduce[OneDriveScope](
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -288,7 +288,7 @@ func reduce[T scopeT, C categoryT](
|
|||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
s Selector,
|
s Selector,
|
||||||
dataCategories map[path.CategoryType]C,
|
dataCategories map[path.CategoryType]C,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
ctx, end := D.Span(ctx, "selectors:reduce")
|
ctx, end := D.Span(ctx, "selectors:reduce")
|
||||||
defer end()
|
defer end()
|
||||||
@ -314,7 +314,7 @@ func reduce[T scopeT, C categoryT](
|
|||||||
for _, ent := range deets.Items() {
|
for _, ent := range deets.Items() {
|
||||||
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
|
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "transforming repoRef to path").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(err, "transforming repoRef to path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,7 +326,7 @@ func reduce[T scopeT, C categoryT](
|
|||||||
if len(ent.LocationRef) > 0 {
|
if len(ent.LocationRef) > 0 {
|
||||||
pb, err := path.Builder{}.SplitUnescapeAppend(ent.LocationRef)
|
pb, err := path.Builder{}.SplitUnescapeAppend(ent.LocationRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,7 +338,7 @@ func reduce[T scopeT, C categoryT](
|
|||||||
repoPath.Category(),
|
repoPath.Category(),
|
||||||
true)
|
true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.Add(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -284,7 +284,7 @@ func (suite *SelectorScopesSuite) TestReduce() {
|
|||||||
dataCats,
|
dataCats,
|
||||||
errs)
|
errs)
|
||||||
require.NotNil(t, result)
|
require.NotNil(t, result)
|
||||||
require.NoError(t, errs.Err(), "no recoverable errors")
|
require.NoError(t, errs.Failure(), "no recoverable errors")
|
||||||
assert.Len(t, result.Entries, test.expectLen)
|
assert.Len(t, result.Entries, test.expectLen)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -70,7 +70,7 @@ var (
|
|||||||
const All = "All"
|
const All = "All"
|
||||||
|
|
||||||
type Reducer interface {
|
type Reducer interface {
|
||||||
Reduce(context.Context, *details.Details, *fault.Errors) *details.Details
|
Reduce(context.Context, *details.Details, *fault.Bus) *details.Details
|
||||||
}
|
}
|
||||||
|
|
||||||
// selectorResourceOwners aggregates all discrete path category types described
|
// selectorResourceOwners aggregates all discrete path category types described
|
||||||
@ -240,7 +240,7 @@ func (s Selector) PathService() path.ServiceType {
|
|||||||
func (s Selector) Reduce(
|
func (s Selector) Reduce(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) (*details.Details, error) {
|
) (*details.Details, error) {
|
||||||
r, err := selectorAsIface[Reducer](s)
|
r, err := selectorAsIface[Reducer](s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -570,7 +570,7 @@ func (s SharePointScope) DiscreteCopy(site string) SharePointScope {
|
|||||||
func (s sharePoint) Reduce(
|
func (s sharePoint) Reduce(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Errors,
|
errs *fault.Bus,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
return reduce[SharePointScope](
|
return reduce[SharePointScope](
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -31,12 +31,12 @@ func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return users, errs.Err()
|
return users, errs.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Users returns a list of users in the specified M365 tenant
|
// Users returns a list of users in the specified M365 tenant
|
||||||
// TODO: Implement paging support
|
// TODO: Implement paging support
|
||||||
func Users(ctx context.Context, acct account.Account, errs *fault.Errors) ([]*User, error) {
|
func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "initializing M365 graph connection")
|
return nil, errors.Wrap(err, "initializing M365 graph connection")
|
||||||
@ -61,7 +61,7 @@ func Users(ctx context.Context, acct account.Account, errs *fault.Errors) ([]*Us
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UserIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) {
|
func UserIDs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
|
||||||
users, err := Users(ctx, acct, errs)
|
users, err := Users(ctx, acct, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -77,7 +77,7 @@ func UserIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]s
|
|||||||
|
|
||||||
// UserPNs retrieves all user principleNames in the tenant. Principle Names
|
// UserPNs retrieves all user principleNames in the tenant. Principle Names
|
||||||
// can be used analogous userIDs in graph API queries.
|
// can be used analogous userIDs in graph API queries.
|
||||||
func UserPNs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) {
|
func UserPNs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
|
||||||
users, err := Users(ctx, acct, errs)
|
users, err := Users(ctx, acct, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -92,7 +92,7 @@ func UserPNs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
||||||
func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) {
|
func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "initializing M365 graph connection")
|
return nil, errors.Wrap(err, "initializing M365 graph connection")
|
||||||
@ -102,7 +102,7 @@ func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
||||||
func SiteIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) {
|
func SiteIDs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "initializing graph connection")
|
return nil, errors.Wrap(err, "initializing graph connection")
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user