Compare commits

...

11 Commits

Author SHA1 Message Date
ryanfkeepers
8d52f7655a Add new graph status in prep for migration
Fault allows us to remove the error tracking from
the graph support statuses.  This PR is a first step
in a small refactor to slim down the status.  Following
PRs will replace the existing status with the new one.
2023-02-20 15:59:33 -07:00
ryanfkeepers
5593352226 fix unit tests 2023-02-20 15:28:23 -07:00
ryanfkeepers
f88cf05117 fault package funcs rename
Renaming the funcs in the fault
package to be more clear about
their purpose and behavior.  Largely
just find&replace changes, except
for fault.go and the fault examples.
2023-02-20 14:44:09 -07:00
ryanfkeepers
7b1682c68d last little onedrive completion 2023-02-19 09:05:17 -07:00
ryanfkeepers
88b5df728b add clues & fault to onedrive collections 2023-02-19 09:03:10 -07:00
ryanfkeepers
c62c246ee2 fix clues addall and withall 2023-02-19 08:31:41 -07:00
ryanfkeepers
c5b5a60d4e use tracker 2023-02-19 08:30:38 -07:00
ryanfkeepers
734e90c960 adding clues & fault to onedrive restore 2023-02-19 08:30:38 -07:00
ryanfkeepers
3edc74c170 linter fix 2023-02-19 08:26:37 -07:00
ryanfkeepers
66f734b4db watch for et errors 2023-02-19 08:26:09 -07:00
ryanfkeepers
b4a31c08dd add fault.tracker for error additions
Realized we had a race condition: in an async
runtime it's possible for an errs.Err() to be
returned by multiple functions, even though that
Err() was only sourced by one of them.  The
addition of a tracker contains the returned
error into the scope of that func so that only
the error produced in the current iteration is
returned.
2023-02-19 08:25:02 -07:00
80 changed files with 1300 additions and 860 deletions

View File

@ -314,8 +314,8 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
bups, ferrs := r.Backups(ctx, bIDs) bups, ferrs := r.Backups(ctx, bIDs)
// TODO: print/log recoverable errors // TODO: print/log recoverable errors
if ferrs.Err() != nil { if ferrs.Failure() != nil {
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
} }
backup.PrintAll(ctx, bups) backup.PrintAll(ctx, bups)
@ -492,7 +492,7 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
// runDetailsExchangeCmd actually performs the lookup in backup details. // runDetailsExchangeCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if // the fault.Errors return is always non-nil. Callers should check if
// errs.Err() == nil. // errs.Failure() == nil.
func runDetailsExchangeCmd( func runDetailsExchangeCmd(
ctx context.Context, ctx context.Context,
r repository.BackupGetter, r repository.BackupGetter,
@ -505,12 +505,12 @@ func runDetailsExchangeCmd(
d, _, errs := r.BackupDetails(ctx, backupID) d, _, errs := r.BackupDetails(ctx, backupID)
// TODO: log/track recoverable errors // TODO: log/track recoverable errors
if errs.Err() != nil { if errs.Failure() != nil {
if errors.Is(errs.Err(), data.ErrNotFound) { if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, errors.Errorf("No backup exists with the id %s", backupID) return nil, errors.Errorf("No backup exists with the id %s", backupID)
} }
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
} }
sel := utils.IncludeExchangeRestoreDataSelectors(opts) sel := utils.IncludeExchangeRestoreDataSelectors(opts)

View File

@ -297,8 +297,8 @@ func (suite *PreparedBackupExchangeIntegrationSuite) SetupSuite() {
require.NoError(t, err, "retrieving recent backup by ID") require.NoError(t, err, "retrieving recent backup by ID")
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id") require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
_, b, errs := suite.repo.BackupDetails(ctx, bIDs) _, b, errs := suite.repo.BackupDetails(ctx, bIDs)
require.NoError(t, errs.Err(), "retrieving recent backup details by ID") require.NoError(t, errs.Failure(), "retrieving recent backup details by ID")
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID") require.Empty(t, errs.Recovered(), "retrieving recent backup details by ID")
require.Equal(t, bIDs, string(b.ID), "repo details matches results id") require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
suite.backupOps[set] = string(b.ID) suite.backupOps[set] = string(b.ID)
@ -398,8 +398,8 @@ func (suite *PreparedBackupExchangeIntegrationSuite) TestExchangeDetailsCmd() {
// fetch the details from the repo first // fetch the details from the repo first
deets, _, errs := suite.repo.BackupDetails(ctx, string(bID)) deets, _, errs := suite.repo.BackupDetails(ctx, string(bID))
require.NoError(t, errs.Err()) require.NoError(t, errs.Failure())
require.Empty(t, errs.Errs()) require.Empty(t, errs.Recovered())
cmd := tester.StubRootCmd( cmd := tester.StubRootCmd(
"backup", "details", "exchange", "backup", "details", "exchange",

View File

@ -237,8 +237,8 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
bups, ferrs := r.Backups(ctx, bIDs) bups, ferrs := r.Backups(ctx, bIDs)
// TODO: print/log recoverable errors // TODO: print/log recoverable errors
if ferrs.Err() != nil { if ferrs.Failure() != nil {
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
} }
backup.PrintAll(ctx, bups) backup.PrintAll(ctx, bups)
@ -384,7 +384,7 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
// runDetailsOneDriveCmd actually performs the lookup in backup details. // runDetailsOneDriveCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if // the fault.Errors return is always non-nil. Callers should check if
// errs.Err() == nil. // errs.Failure() == nil.
func runDetailsOneDriveCmd( func runDetailsOneDriveCmd(
ctx context.Context, ctx context.Context,
r repository.BackupGetter, r repository.BackupGetter,
@ -397,12 +397,12 @@ func runDetailsOneDriveCmd(
d, _, errs := r.BackupDetails(ctx, backupID) d, _, errs := r.BackupDetails(ctx, backupID)
// TODO: log/track recoverable errors // TODO: log/track recoverable errors
if errs.Err() != nil { if errs.Failure() != nil {
if errors.Is(errs.Err(), data.ErrNotFound) { if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, errors.Errorf("no backup exists with the id %s", backupID) return nil, errors.Errorf("no backup exists with the id %s", backupID)
} }
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
} }
sel := utils.IncludeOneDriveRestoreDataSelectors(opts) sel := utils.IncludeOneDriveRestoreDataSelectors(opts)

View File

@ -257,8 +257,8 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
bups, ferrs := r.Backups(ctx, bIDs) bups, ferrs := r.Backups(ctx, bIDs)
// TODO: print/log recoverable errors // TODO: print/log recoverable errors
if ferrs.Err() != nil { if ferrs.Failure() != nil {
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) return Only(ctx, errors.Wrap(ferrs.Failure(), "Unable to retrieve backup results from storage"))
} }
backup.PrintAll(ctx, bups) backup.PrintAll(ctx, bups)
@ -506,7 +506,7 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
// runDetailsSharePointCmd actually performs the lookup in backup details. // runDetailsSharePointCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if // the fault.Errors return is always non-nil. Callers should check if
// errs.Err() == nil. // errs.Failure() == nil.
func runDetailsSharePointCmd( func runDetailsSharePointCmd(
ctx context.Context, ctx context.Context,
r repository.BackupGetter, r repository.BackupGetter,
@ -519,12 +519,12 @@ func runDetailsSharePointCmd(
d, _, errs := r.BackupDetails(ctx, backupID) d, _, errs := r.BackupDetails(ctx, backupID)
// TODO: log/track recoverable errors // TODO: log/track recoverable errors
if errs.Err() != nil { if errs.Failure() != nil {
if errors.Is(errs.Err(), data.ErrNotFound) { if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, errors.Errorf("no backup exists with the id %s", backupID) return nil, errors.Errorf("no backup exists with the id %s", backupID)
} }
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
} }
sel := utils.IncludeSharePointRestoreDataSelectors(opts) sel := utils.IncludeSharePointRestoreDataSelectors(opts)

View File

@ -112,8 +112,8 @@ func (suite *RestoreExchangeIntegrationSuite) SetupSuite() {
require.NoError(t, err, "retrieving recent backup by ID") require.NoError(t, err, "retrieving recent backup by ID")
_, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID)) _, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
require.NoError(t, errs.Err(), "retrieving recent backup details by ID") require.NoError(t, errs.Failure(), "retrieving recent backup details by ID")
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID") require.Empty(t, errs.Recovered(), "retrieving recent backup details by ID")
} }
} }

View File

@ -501,7 +501,7 @@ func (MockBackupGetter) Backup(
func (MockBackupGetter) Backups( func (MockBackupGetter) Backups(
context.Context, context.Context,
[]model.StableID, []model.StableID,
) ([]*backup.Backup, *fault.Errors) { ) ([]*backup.Backup, *fault.Bus) {
return nil, fault.New(false).Fail(errors.New("unexpected call to mock")) return nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
} }
@ -515,7 +515,7 @@ func (MockBackupGetter) BackupsByTag(
func (bg *MockBackupGetter) BackupDetails( func (bg *MockBackupGetter) BackupDetails(
ctx context.Context, ctx context.Context,
backupID string, backupID string,
) (*details.Details, *backup.Backup, *fault.Errors) { ) (*details.Details, *backup.Backup, *fault.Bus) {
if bg == nil { if bg == nil {
return testdata.GetDetailsSet(), nil, fault.New(true) return testdata.GetDetailsSet(), nil, fault.New(true)
} }

View File

@ -53,7 +53,7 @@ func generateAndRestoreItems(
howMany int, howMany int,
dbf dataBuilderFunc, dbf dataBuilderFunc,
opts control.Options, opts control.Options,
errs *fault.Errors, errs *fault.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
items := make([]item, 0, howMany) items := make([]item, 0, howMany)

View File

@ -79,7 +79,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
} }
log := logger.Ctx(ctx) log := logger.Ctx(ctx)
for _, e := range errs.Errs() { for _, e := range errs.Recovered() {
log.Errorw(e.Error(), clues.InErr(err).Slice()...) log.Errorw(e.Error(), clues.InErr(err).Slice()...)
} }
@ -126,7 +126,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
} }
log := logger.Ctx(ctx) log := logger.Ctx(ctx)
for _, e := range errs.Errs() { for _, e := range errs.Recovered() {
log.Errorw(e.Error(), clues.InErr(err).Slice()...) log.Errorw(e.Error(), clues.InErr(err).Slice()...)
} }
@ -178,7 +178,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
} }
log := logger.Ctx(ctx) log := logger.Ctx(ctx)
for _, e := range errs.Errs() { for _, e := range errs.Recovered() {
log.Errorw(e.Error(), clues.InErr(err).Slice()...) log.Errorw(e.Error(), clues.InErr(err).Slice()...)
} }

View File

@ -93,7 +93,7 @@ func runDisplayM365JSON(
ctx context.Context, ctx context.Context,
creds account.M365Config, creds account.M365Config,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
var ( var (
bs []byte bs []byte
@ -143,7 +143,7 @@ type itemer interface {
GetItem( GetItem(
ctx context.Context, ctx context.Context,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) ) (serialization.Parsable, *details.ExchangeInfo, error)
Serialize( Serialize(
ctx context.Context, ctx context.Context,
@ -156,7 +156,7 @@ func getItem(
ctx context.Context, ctx context.Context,
itm itemer, itm itemer,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) ([]byte, error) { ) ([]byte, error) {
sp, _, err := itm.GetItem(ctx, user, itemID, errs) sp, _, err := itm.GetItem(ctx, user, itemID, errs)
if err != nil { if err != nil {

View File

@ -157,7 +157,7 @@ func purgeOneDriveFolders(
return nil, err return nil, err
} }
cfs, err := onedrive.GetAllFolders(ctx, gs, pager, prefix) cfs, err := onedrive.GetAllFolders(ctx, gs, pager, prefix, fault.New(true))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -39,7 +39,7 @@ func (gc *GraphConnector) DataCollections(
sels selectors.Selector, sels selectors.Selector,
metadata []data.RestoreCollection, metadata []data.RestoreCollection,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String())) ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
defer end() defer end()
@ -91,7 +91,7 @@ func (gc *GraphConnector) DataCollections(
return colls, excludes, nil return colls, excludes, nil
case selectors.ServiceOneDrive: case selectors.ServiceOneDrive:
return gc.OneDriveDataCollections(ctx, sels, metadata, ctrlOpts) return gc.OneDriveDataCollections(ctx, sels, metadata, ctrlOpts, errs)
case selectors.ServiceSharePoint: case selectors.ServiceSharePoint:
colls, excludes, err := sharepoint.DataCollections( colls, excludes, err := sharepoint.DataCollections(
@ -193,6 +193,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
selector selectors.Selector, selector selectors.Selector,
metadata []data.RestoreCollection, metadata []data.RestoreCollection,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
odb, err := selector.ToOneDriveBackup() odb, err := selector.ToOneDriveBackup()
if err != nil { if err != nil {
@ -218,7 +219,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
gc.Service, gc.Service,
gc.UpdateStatus, gc.UpdateStatus,
ctrlOpts, ctrlOpts,
).Get(ctx, metadata) ).Get(ctx, metadata, errs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -249,7 +250,7 @@ func (gc *GraphConnector) RestoreDataCollections(
dest control.RestoreDestination, dest control.RestoreDestination,
opts control.Options, opts control.Options,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
errs *fault.Errors, errs *fault.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
ctx, end := D.Span(ctx, "connector:restore") ctx, end := D.Span(ctx, "connector:restore")
defer end() defer end()
@ -268,7 +269,7 @@ func (gc *GraphConnector) RestoreDataCollections(
case selectors.ServiceExchange: case selectors.ServiceExchange:
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets, errs) status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets, errs)
case selectors.ServiceOneDrive: case selectors.ServiceOneDrive:
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets) status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets, errs)
case selectors.ServiceSharePoint: case selectors.ServiceSharePoint:
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs) status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs)
default: default:

View File

@ -86,7 +86,7 @@ func userOptions(fs *string) *users.UsersRequestBuilderGetRequestConfiguration {
} }
// GetAll retrieves all users. // GetAll retrieves all users.
func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userable, error) { func (c Users) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Userable, error) {
service, err := c.service() service, err := c.service()
if err != nil { if err != nil {
return nil, err return nil, err
@ -108,16 +108,19 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userabl
return nil, clues.Wrap(err, "creating users iterator").WithClues(ctx).With(graph.ErrData(err)...) return nil, clues.Wrap(err, "creating users iterator").WithClues(ctx).With(graph.ErrData(err)...)
} }
us := make([]models.Userable, 0) var (
us = make([]models.Userable, 0)
el = errs.Local()
)
iterator := func(item any) bool { iterator := func(item any) bool {
if errs.Err() != nil { if el.Failure() != nil {
return false return false
} }
u, err := validateUser(item) u, err := validateUser(item)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "validating user").WithClues(ctx).With(graph.ErrData(err)...)) el.AddRecoverable(clues.Wrap(err, "validating user").WithClues(ctx).With(graph.ErrData(err)...))
} else { } else {
us = append(us, u) us = append(us, u)
} }
@ -129,7 +132,7 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Errors) ([]models.Userabl
return nil, clues.Wrap(err, "iterating all users").WithClues(ctx).With(graph.ErrData(err)...) return nil, clues.Wrap(err, "iterating all users").WithClues(ctx).With(graph.ErrData(err)...)
} }
return us, errs.Err() return us, el.Failure()
} }
func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) { func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) {

View File

@ -15,7 +15,7 @@ import (
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
type getAller interface { type getAller interface {
GetAll(context.Context, *fault.Errors) ([]models.Userable, error) GetAll(context.Context, *fault.Bus) ([]models.Userable, error)
} }
type getter interface { type getter interface {
@ -36,7 +36,7 @@ type getWithInfoer interface {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Users fetches all users in the tenant. // Users fetches all users in the tenant.
func Users(ctx context.Context, ga getAller, errs *fault.Errors) ([]models.Userable, error) { func Users(ctx context.Context, ga getAller, errs *fault.Bus) ([]models.Userable, error) {
return ga.GetAll(ctx, errs) return ga.GetAll(ctx, errs)
} }

View File

@ -72,7 +72,7 @@ func (c Contacts) DeleteContainer(
func (c Contacts) GetItem( func (c Contacts) GetItem(
ctx context.Context, ctx context.Context,
user, itemID string, user, itemID string,
_ *fault.Errors, // no attachments to iterate over, so this goes unused _ *fault.Bus, // no attachments to iterate over, so this goes unused
) (serialization.Parsable, *details.ExchangeInfo, error) { ) (serialization.Parsable, *details.ExchangeInfo, error) {
cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil) cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil)
if err != nil { if err != nil {
@ -109,7 +109,7 @@ func (c Contacts) EnumerateContainers(
ctx context.Context, ctx context.Context,
userID, baseDirID string, userID, baseDirID string,
fn func(graph.CacheFolder) error, fn func(graph.CacheFolder) error,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
service, err := c.service() service, err := c.service()
if err != nil { if err != nil {
@ -138,12 +138,12 @@ func (c Contacts) EnumerateContainers(
} }
for _, fold := range resp.GetValue() { for _, fold := range resp.GetValue() {
if errs.Err() != nil { if errs.Failure() != nil {
return errs.Err() return errs.Failure()
} }
if err := checkIDAndName(fold); err != nil { if err := checkIDAndName(fold); err != nil {
errs.Add(clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)) errs.AddRecoverable(clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...))
continue continue
} }
@ -154,7 +154,7 @@ func (c Contacts) EnumerateContainers(
temp := graph.NewCacheFolder(fold, nil, nil) temp := graph.NewCacheFolder(fold, nil, nil)
if err := fn(temp); err != nil { if err := fn(temp); err != nil {
errs.Add(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...)) errs.AddRecoverable(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...))
continue continue
} }
} }
@ -167,7 +167,7 @@ func (c Contacts) EnumerateContainers(
builder = users.NewItemContactFoldersItemChildFoldersRequestBuilder(link, service.Adapter()) builder = users.NewItemContactFoldersItemChildFoldersRequestBuilder(link, service.Adapter())
} }
return errs.Err() return errs.Failure()
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -96,7 +96,7 @@ func (c Events) GetContainerByID(
func (c Events) GetItem( func (c Events) GetItem(
ctx context.Context, ctx context.Context,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) { ) (serialization.Parsable, *details.ExchangeInfo, error) {
var ( var (
err error err error
@ -141,7 +141,7 @@ func (c Events) EnumerateContainers(
ctx context.Context, ctx context.Context,
userID, baseDirID string, userID, baseDirID string,
fn func(graph.CacheFolder) error, fn func(graph.CacheFolder) error,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
service, err := c.service() service, err := c.service()
if err != nil { if err != nil {
@ -164,7 +164,7 @@ func (c Events) EnumerateContainers(
for _, cal := range resp.GetValue() { for _, cal := range resp.GetValue() {
cd := CalendarDisplayable{Calendarable: cal} cd := CalendarDisplayable{Calendarable: cal}
if err := checkIDAndName(cd); err != nil { if err := checkIDAndName(cd); err != nil {
errs.Add(clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)) errs.AddRecoverable(clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...))
continue continue
} }
@ -178,7 +178,7 @@ func (c Events) EnumerateContainers(
path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path
path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location
if err := fn(temp); err != nil { if err := fn(temp); err != nil {
errs.Add(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...)) errs.AddRecoverable(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...))
continue continue
} }
} }
@ -191,7 +191,7 @@ func (c Events) EnumerateContainers(
builder = users.NewItemCalendarsRequestBuilder(link, service.Adapter()) builder = users.NewItemCalendarsRequestBuilder(link, service.Adapter())
} }
return errs.Err() return errs.Failure()
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -124,7 +124,7 @@ func (c Mail) GetContainerByID(
func (c Mail) GetItem( func (c Mail) GetItem(
ctx context.Context, ctx context.Context,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) { ) (serialization.Parsable, *details.ExchangeInfo, error) {
mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil) mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil)
if err != nil { if err != nil {
@ -164,7 +164,7 @@ func (c Mail) EnumerateContainers(
ctx context.Context, ctx context.Context,
userID, baseDirID string, userID, baseDirID string,
fn func(graph.CacheFolder) error, fn func(graph.CacheFolder) error,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
service, err := c.service() service, err := c.service()
if err != nil { if err != nil {
@ -190,7 +190,7 @@ func (c Mail) EnumerateContainers(
temp := graph.NewCacheFolder(v, nil, nil) temp := graph.NewCacheFolder(v, nil, nil)
if err := fn(temp); err != nil { if err := fn(temp); err != nil {
errs.Add(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...)) errs.AddRecoverable(clues.Stack(err).WithClues(fctx).With(graph.ErrData(err)...))
continue continue
} }
} }
@ -203,7 +203,7 @@ func (c Mail) EnumerateContainers(
builder = users.NewItemMailFoldersDeltaRequestBuilder(link, service.Adapter()) builder = users.NewItemMailFoldersDeltaRequestBuilder(link, service.Adapter())
} }
return errs.Err() return errs.Failure()
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -47,7 +47,7 @@ func (cfc *contactFolderCache) populateContactRoot(
// as of (Oct-07-2022) // as of (Oct-07-2022)
func (cfc *contactFolderCache) Populate( func (cfc *contactFolderCache) Populate(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
baseID string, baseID string,
baseContainerPather ...string, baseContainerPather ...string,
) error { ) error {

View File

@ -28,7 +28,7 @@ type containersEnumerator interface {
ctx context.Context, ctx context.Context,
userID, baseDirID string, userID, baseDirID string,
fn func(graph.CacheFolder) error, fn func(graph.CacheFolder) error,
errs *fault.Errors, errs *fault.Bus,
) error ) error
} }

View File

@ -64,7 +64,7 @@ type DeltaPath struct {
func parseMetadataCollections( func parseMetadataCollections(
ctx context.Context, ctx context.Context,
colls []data.RestoreCollection, colls []data.RestoreCollection,
errs *fault.Errors, errs *fault.Bus,
) (CatDeltaPaths, error) { ) (CatDeltaPaths, error) {
// cdp stores metadata // cdp stores metadata
cdp := CatDeltaPaths{ cdp := CatDeltaPaths{
@ -168,7 +168,7 @@ func DataCollections(
acct account.M365Config, acct account.M365Config,
su support.StatusUpdater, su support.StatusUpdater,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
eb, err := selector.ToExchangeBackup() eb, err := selector.ToExchangeBackup()
if err != nil { if err != nil {
@ -178,6 +178,7 @@ func DataCollections(
var ( var (
user = selector.DiscreteOwner user = selector.DiscreteOwner
collections = []data.BackupCollection{} collections = []data.BackupCollection{}
el = errs.Local()
) )
cdps, err := parseMetadataCollections(ctx, metadata, errs) cdps, err := parseMetadataCollections(ctx, metadata, errs)
@ -186,7 +187,7 @@ func DataCollections(
} }
for _, scope := range eb.Scopes() { for _, scope := range eb.Scopes() {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -200,14 +201,14 @@ func DataCollections(
su, su,
errs) errs)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
collections = append(collections, dcs...) collections = append(collections, dcs...)
} }
return collections, nil, errs.Err() return collections, nil, el.Failure()
} }
func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) { func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) {
@ -234,7 +235,7 @@ func createCollections(
dps DeltaPaths, dps DeltaPaths,
ctrlOpts control.Options, ctrlOpts control.Options,
su support.StatusUpdater, su support.StatusUpdater,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, error) { ) ([]data.BackupCollection, error) {
var ( var (
allCollections = make([]data.BackupCollection, 0) allCollections = make([]data.BackupCollection, 0)

View File

@ -62,7 +62,7 @@ func (ecc *eventCalendarCache) populateEventRoot(ctx context.Context) error {
// @param baseID: ignored. Present to conform to interface // @param baseID: ignored. Present to conform to interface
func (ecc *eventCalendarCache) Populate( func (ecc *eventCalendarCache) Populate(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
baseID string, baseID string,
baseContainerPath ...string, baseContainerPath ...string,
) error { ) error {

View File

@ -45,7 +45,7 @@ type itemer interface {
GetItem( GetItem(
ctx context.Context, ctx context.Context,
user, itemID string, user, itemID string,
errs *fault.Errors, errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) ) (serialization.Parsable, *details.ExchangeInfo, error)
Serialize( Serialize(
ctx context.Context, ctx context.Context,
@ -127,7 +127,7 @@ func NewCollection(
// Items utility function to asynchronously execute process to fill data channel with // Items utility function to asynchronously execute process to fill data channel with
// M365 exchange objects and returns the data channel // M365 exchange objects and returns the data channel
func (col *Collection) Items(ctx context.Context, errs *fault.Errors) <-chan data.Stream { func (col *Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
go col.streamItems(ctx, errs) go col.streamItems(ctx, errs)
return col.data return col.data
} }
@ -163,7 +163,7 @@ func (col Collection) DoNotMergeItems() bool {
// streamItems is a utility function that uses col.collectionType to be able to serialize // streamItems is a utility function that uses col.collectionType to be able to serialize
// all the M365IDs defined in the added field. data channel is closed by this function // all the M365IDs defined in the added field. data channel is closed by this function
func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) { func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
var ( var (
success int64 success int64
totalBytes int64 totalBytes int64
@ -177,7 +177,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
) )
defer func() { defer func() {
col.finishPopulation(ctx, int(success), totalBytes, errs.Err()) col.finishPopulation(ctx, int(success), totalBytes, errs.Failure())
}() }()
if len(col.added)+len(col.removed) > 0 { if len(col.added)+len(col.removed) > 0 {
@ -226,7 +226,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
// add any new items // add any new items
for id := range col.added { for id := range col.added {
if errs.Err() != nil { if errs.Failure() != nil {
break break
} }
@ -253,7 +253,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...) log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
} else { } else {
errs.Add(clues.Wrap(err, "fetching item")) errs.AddRecoverable(clues.Wrap(err, "fetching item"))
} }
return return
@ -261,7 +261,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Errors) {
data, err := col.items.Serialize(ctx, item, user, id) data, err := col.items.Serialize(ctx, item, user, id)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "serializing item")) errs.AddRecoverable(clues.Wrap(err, "serializing item"))
return return
} }
@ -291,7 +291,7 @@ func getItemWithRetries(
ctx context.Context, ctx context.Context,
userID, itemID string, userID, itemID string,
items itemer, items itemer,
errs *fault.Errors, errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) { ) (serialization.Parsable, *details.ExchangeInfo, error) {
item, info, err := items.GetItem(ctx, userID, itemID, errs) item, info, err := items.GetItem(ctx, userID, itemID, errs)
if err != nil { if err != nil {

View File

@ -30,7 +30,7 @@ type mockItemer struct {
func (mi *mockItemer) GetItem( func (mi *mockItemer) GetItem(
context.Context, context.Context,
string, string, string, string,
*fault.Errors, *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) { ) (serialization.Parsable, *details.ExchangeInfo, error) {
mi.getCount++ mi.getCount++
return nil, nil, mi.getErr return nil, nil, mi.getErr

View File

@ -72,7 +72,7 @@ func (mc *mailFolderCache) populateMailRoot(ctx context.Context) error {
// for the base container in the cache. // for the base container in the cache.
func (mc *mailFolderCache) Populate( func (mc *mailFolderCache) Populate(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
baseID string, baseID string,
baseContainerPath ...string, baseContainerPath ...string,
) error { ) error {

View File

@ -36,7 +36,7 @@ func createService(credentials account.M365Config) (*graph.Service, error) {
func PopulateExchangeContainerResolver( func PopulateExchangeContainerResolver(
ctx context.Context, ctx context.Context,
qp graph.QueryParams, qp graph.QueryParams,
errs *fault.Errors, errs *fault.Bus,
) (graph.ContainerResolver, error) { ) (graph.ContainerResolver, error) {
var ( var (
res graph.ContainerResolver res graph.ContainerResolver

View File

@ -39,7 +39,7 @@ func filterContainersAndFillCollections(
scope selectors.ExchangeScope, scope selectors.ExchangeScope,
dps DeltaPaths, dps DeltaPaths,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
var ( var (
// folder ID -> delta url or folder path lookups // folder ID -> delta url or folder path lookups
@ -68,9 +68,11 @@ func filterContainersAndFillCollections(
return err return err
} }
el := errs.Local()
for _, c := range resolver.Items() { for _, c := range resolver.Items() {
if errs.Err() != nil { if el.Failure() != nil {
return errs.Err() return el.Failure()
} }
cID := *c.GetId() cID := *c.GetId()
@ -100,7 +102,7 @@ func filterContainersAndFillCollections(
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta) added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
if err != nil { if err != nil {
if !graph.IsErrDeletedInFlight(err) { if !graph.IsErrDeletedInFlight(err) {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -155,8 +157,12 @@ func filterContainersAndFillCollections(
// in the `previousPath` set, but does not exist in the current container // in the `previousPath` set, but does not exist in the current container
// resolver (which contains all the resource owners' current containers). // resolver (which contains all the resource owners' current containers).
for id, p := range tombstones { for id, p := range tombstones {
if el.Failure() != nil {
return el.Failure()
}
if collections[id] != nil { if collections[id] != nil {
errs.Add(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ctx))
continue continue
} }
@ -205,15 +211,14 @@ func filterContainersAndFillCollections(
path.ExchangeService, path.ExchangeService,
qp.Category, qp.Category,
entries, entries,
statusUpdater, statusUpdater)
)
if err != nil { if err != nil {
return clues.Wrap(err, "making metadata collection") return clues.Wrap(err, "making metadata collection")
} }
collections["metadata"] = col collections["metadata"] = col
return errs.Err() return el.Failure()
} }
// produces a set of id:path pairs from the deltapaths map. // produces a set of id:path pairs from the deltapaths map.

View File

@ -91,8 +91,8 @@ func (m mockResolver) DestinationNameToID(dest string) string { return m.added[d
func (m mockResolver) IDToPath(context.Context, string, bool) (*path.Builder, *path.Builder, error) { func (m mockResolver) IDToPath(context.Context, string, bool) (*path.Builder, *path.Builder, error) {
return nil, nil, nil return nil, nil, nil
} }
func (m mockResolver) PathInCache(string) (string, bool) { return "", false } func (m mockResolver) PathInCache(string) (string, bool) { return "", false }
func (m mockResolver) Populate(context.Context, *fault.Errors, string, ...string) error { return nil } func (m mockResolver) Populate(context.Context, *fault.Bus, string, ...string) error { return nil }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// tests // tests

View File

@ -37,7 +37,7 @@ func RestoreExchangeObject(
policy control.CollisionPolicy, policy control.CollisionPolicy,
service graph.Servicer, service graph.Servicer,
destination, user string, destination, user string,
errs *fault.Errors, errs *fault.Bus,
) (*details.ExchangeInfo, error) { ) (*details.ExchangeInfo, error) {
if policy != control.Copy { if policy != control.Copy {
return nil, clues.Wrap(clues.New(policy.String()), "policy not supported for Exchange restore").WithClues(ctx) return nil, clues.Wrap(clues.New(policy.String()), "policy not supported for Exchange restore").WithClues(ctx)
@ -102,7 +102,7 @@ func RestoreExchangeEvent(
service graph.Servicer, service graph.Servicer,
cp control.CollisionPolicy, cp control.CollisionPolicy,
destination, user string, destination, user string,
errs *fault.Errors, errs *fault.Bus,
) (*details.ExchangeInfo, error) { ) (*details.ExchangeInfo, error) {
event, err := support.CreateEventFromBytes(bits) event, err := support.CreateEventFromBytes(bits)
if err != nil { if err != nil {
@ -112,6 +112,7 @@ func RestoreExchangeEvent(
ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId())) ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId()))
var ( var (
el = errs.Local()
transformedEvent = support.ToEventSimplified(event) transformedEvent = support.ToEventSimplified(event)
attached []models.Attachmentable attached []models.Attachmentable
) )
@ -139,19 +140,19 @@ func RestoreExchangeEvent(
} }
for _, attach := range attached { for _, attach := range attached {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
if err := uploadAttachment(ctx, uploader, attach); err != nil { if err := uploadAttachment(ctx, uploader, attach); err != nil {
errs.Add(err) el.AddRecoverable(err)
} }
} }
info := api.EventInfo(event) info := api.EventInfo(event)
info.Size = int64(len(bits)) info.Size = int64(len(bits))
return info, errs.Err() return info, el.Failure()
} }
// RestoreMailMessage utility function to place an exchange.Mail // RestoreMailMessage utility function to place an exchange.Mail
@ -166,7 +167,7 @@ func RestoreMailMessage(
service graph.Servicer, service graph.Servicer,
cp control.CollisionPolicy, cp control.CollisionPolicy,
destination, user string, destination, user string,
errs *fault.Errors, errs *fault.Bus,
) (*details.ExchangeInfo, error) { ) (*details.ExchangeInfo, error) {
// Creates messageable object from original bytes // Creates messageable object from original bytes
originalMessage, err := support.CreateMessageFromBytes(bits) originalMessage, err := support.CreateMessageFromBytes(bits)
@ -239,7 +240,7 @@ func SendMailToBackStore(
service graph.Servicer, service graph.Servicer,
user, destination string, user, destination string,
message models.Messageable, message models.Messageable,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
attached := message.GetAttachments() attached := message.GetAttachments()
@ -255,17 +256,19 @@ func SendMailToBackStore(
return clues.New("nil response from post").WithClues(ctx) return clues.New("nil response from post").WithClues(ctx)
} }
id := ptr.Val(response.GetId()) var (
el = errs.Local()
uploader := &mailAttachmentUploader{ id = ptr.Val(response.GetId())
userID: user, uploader = &mailAttachmentUploader{
folderID: destination, userID: user,
itemID: id, folderID: destination,
service: service, itemID: id,
} service: service,
}
)
for _, attachment := range attached { for _, attachment := range attached {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -280,13 +283,13 @@ func SendMailToBackStore(
continue continue
} }
errs.Add(errors.Wrap(err, "uploading mail attachment")) el.AddRecoverable(errors.Wrap(err, "uploading mail attachment"))
break break
} }
} }
return errs.Err() return el.Failure()
} }
// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT // RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT
@ -299,7 +302,7 @@ func RestoreExchangeDataCollections(
dest control.RestoreDestination, dest control.RestoreDestination,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) (*support.ConnectorOperationStatus, error) { ) (*support.ConnectorOperationStatus, error) {
var ( var (
directoryCaches = make(map[string]map[path.CategoryType]graph.ContainerResolver) directoryCaches = make(map[string]map[path.CategoryType]graph.ContainerResolver)
@ -307,6 +310,7 @@ func RestoreExchangeDataCollections(
userID string userID string
// TODO policy to be updated from external source after completion of refactoring // TODO policy to be updated from external source after completion of refactoring
policy = control.Copy policy = control.Copy
el = errs.Local()
) )
if len(dcs) > 0 { if len(dcs) > 0 {
@ -315,8 +319,8 @@ func RestoreExchangeDataCollections(
} }
for _, dc := range dcs { for _, dc := range dcs {
if errs.Err() != nil { if el.Failure() != nil {
return nil, errs.Err() break
} }
userCaches := directoryCaches[userID] userCaches := directoryCaches[userID]
@ -333,7 +337,7 @@ func RestoreExchangeDataCollections(
userCaches, userCaches,
errs) errs)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "creating destination").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "creating destination").WithClues(ctx))
continue continue
} }
@ -351,10 +355,10 @@ func RestoreExchangeDataCollections(
support.Restore, support.Restore,
len(dcs), len(dcs),
metrics, metrics,
errs.Err(), el.Failure(),
dest.ContainerName) dest.ContainerName)
return status, errs.Err() return status, el.Failure()
} }
// restoreCollection handles restoration of an individual collection. // restoreCollection handles restoration of an individual collection.
@ -365,7 +369,7 @@ func restoreCollection(
folderID string, folderID string,
policy control.CollisionPolicy, policy control.CollisionPolicy,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) (support.CollectionMetrics, bool) { ) (support.CollectionMetrics, bool) {
ctx, end := D.Span(ctx, "gc:exchange:restoreCollection", D.Label("path", dc.FullPath())) ctx, end := D.Span(ctx, "gc:exchange:restoreCollection", D.Label("path", dc.FullPath()))
defer end() defer end()
@ -396,11 +400,11 @@ func restoreCollection(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
errs.Add(clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx)) errs.AddRecoverable(clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx))
return metrics, true return metrics, true
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok || errs.Err() != nil { if !ok || errs.Failure() != nil {
return metrics, false return metrics, false
} }
@ -412,7 +416,7 @@ func restoreCollection(
_, err := buf.ReadFrom(itemData.ToReader()) _, err := buf.ReadFrom(itemData.ToReader())
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "reading item bytes").WithClues(ictx)) errs.AddRecoverable(clues.Wrap(err, "reading item bytes").WithClues(ictx))
continue continue
} }
@ -428,7 +432,7 @@ func restoreCollection(
user, user,
errs) errs)
if err != nil { if err != nil {
errs.Add(err) errs.AddRecoverable(err)
continue continue
} }
@ -437,7 +441,7 @@ func restoreCollection(
itemPath, err := dc.FullPath().Append(itemData.UUID(), true) itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "building full path with item").WithClues(ctx)) errs.AddRecoverable(clues.Wrap(err, "building full path with item").WithClues(ctx))
continue continue
} }
@ -472,7 +476,7 @@ func CreateContainerDestination(
directory path.Path, directory path.Path,
destination string, destination string,
caches map[path.CategoryType]graph.ContainerResolver, caches map[path.CategoryType]graph.ContainerResolver,
errs *fault.Errors, errs *fault.Bus,
) (string, error) { ) (string, error) {
var ( var (
newCache = false newCache = false
@ -585,7 +589,7 @@ func establishMailRestoreLocation(
mfc graph.ContainerResolver, mfc graph.ContainerResolver,
user string, user string,
isNewCache bool, isNewCache bool,
errs *fault.Errors, errs *fault.Bus,
) (string, error) { ) (string, error) {
// Process starts with the root folder in order to recreate // Process starts with the root folder in order to recreate
// the top-level folder with the same tactic // the top-level folder with the same tactic
@ -644,7 +648,7 @@ func establishContactsRestoreLocation(
cfc graph.ContainerResolver, cfc graph.ContainerResolver,
user string, user string,
isNewCache bool, isNewCache bool,
errs *fault.Errors, errs *fault.Bus,
) (string, error) { ) (string, error) {
cached, ok := cfc.PathInCache(folders[0]) cached, ok := cfc.PathInCache(folders[0])
if ok { if ok {
@ -680,7 +684,7 @@ func establishEventsRestoreLocation(
ecc graph.ContainerResolver, // eventCalendarCache ecc graph.ContainerResolver, // eventCalendarCache
user string, user string,
isNewCache bool, isNewCache bool,
errs *fault.Errors, errs *fault.Bus,
) (string, error) { ) (string, error) {
// Need to prefix with the "Other Calendars" folder so lookup happens properly. // Need to prefix with the "Other Calendars" folder so lookup happens properly.
cached, ok := ecc.PathInCache(folders[0]) cached, ok := ecc.PathInCache(folders[0])

View File

@ -65,7 +65,7 @@ type ContainerResolver interface {
// @param ctx is necessary param for Graph API tracing // @param ctx is necessary param for Graph API tracing
// @param baseFolderID represents the M365ID base that the resolver will // @param baseFolderID represents the M365ID base that the resolver will
// conclude its search. Default input is "". // conclude its search. Default input is "".
Populate(ctx context.Context, errs *fault.Errors, baseFolderID string, baseContainerPather ...string) error Populate(ctx context.Context, errs *fault.Bus, baseFolderID string, baseContainerPather ...string) error
// PathInCache performs a look up of a path reprensentation // PathInCache performs a look up of a path reprensentation
// and returns the m365ID of directory iff the pathString // and returns the m365ID of directory iff the pathString

View File

@ -134,7 +134,7 @@ func (md MetadataCollection) DoNotMergeItems() bool {
func (md MetadataCollection) Items( func (md MetadataCollection) Items(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
) <-chan data.Stream { ) <-chan data.Stream {
res := make(chan data.Stream) res := make(chan data.Stream)

View File

@ -0,0 +1,25 @@
// Code generated by "stringer -type=Operation"; DO NOT EDIT.
package status
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OpUnknown-0]
_ = x[Backup-1]
_ = x[Restore-2]
}
const _Operation_name = "OpUnknownBackupRestore"
var _Operation_index = [...]uint8{0, 9, 15, 22}
func (i Operation) String() string {
if i < 0 || i >= Operation(len(_Operation_index)-1) {
return "Operation(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Operation_name[_Operation_index[i]:_Operation_index[i+1]]
}

View File

@ -0,0 +1,104 @@
package status
import (
"context"
"fmt"
"github.com/dustin/go-humanize"
)
type Operation int
//go:generate stringer -type=Operation
const (
OpUnknown Operation = iota
Backup
Restore
)
// ConnectorStatus is a data type used to describe the state of the sequence of operations.
type ConnectorStatus struct {
Metrics Counts
details string
incomplete bool
op Operation
}
type Counts struct {
Bytes int64
Folders, Objects, Successes int
}
func CombineCounts(a, b Counts) Counts {
return Counts{
Bytes: a.Bytes + b.Bytes,
Folders: a.Folders + b.Folders,
Objects: a.Objects + b.Objects,
Successes: a.Successes + b.Successes,
}
}
// Constructor for ConnectorStatus. If the counts do not agree, an error is returned.
func New(
ctx context.Context,
op Operation,
cs Counts,
details string,
incomplete bool,
) ConnectorStatus {
status := ConnectorStatus{
Metrics: cs,
details: details,
incomplete: incomplete,
op: op,
}
return status
}
// Combine aggregates both ConnectorStatus value into a single status.
func Combine(one, two ConnectorStatus) ConnectorStatus {
if one.op == OpUnknown {
return two
}
if two.op == OpUnknown {
return one
}
status := ConnectorStatus{
Metrics: CombineCounts(one.Metrics, two.Metrics),
details: one.details + ", " + two.details,
incomplete: one.incomplete || two.incomplete,
op: one.op,
}
return status
}
func (cos ConnectorStatus) String() string {
var operationStatement string
switch cos.op {
case Backup:
operationStatement = "Downloaded from "
case Restore:
operationStatement = "Restored content to "
}
var incomplete string
if cos.incomplete {
incomplete = "Incomplete "
}
message := fmt.Sprintf("%sAction: %s performed on %d of %d objects (%s) within %d directories.",
incomplete,
cos.op.String(),
cos.Metrics.Successes,
cos.Metrics.Objects,
humanize.Bytes(uint64(cos.Metrics.Bytes)),
cos.Metrics.Folders)
return message + " " + operationStatement + cos.details
}

View File

@ -0,0 +1,91 @@
package status
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/tester"
)
type StatusUnitSuite struct {
tester.Suite
}
func TestGraphConnectorStatus(t *testing.T) {
suite.Run(t, &StatusUnitSuite{tester.NewUnitSuite(t)})
}
func (suite *StatusUnitSuite) TestNew() {
ctx, flush := tester.NewContext()
defer flush()
result := New(
ctx,
Backup,
Counts{1, 1, 1, 1},
"details",
true)
assert.True(suite.T(), result.incomplete, "status is incomplete")
}
func (suite *StatusUnitSuite) TestMergeStatus() {
ctx, flush := tester.NewContext()
defer flush()
table := []struct {
name string
one ConnectorStatus
two ConnectorStatus
expectOP Operation
expected Counts
isIncomplete assert.BoolAssertionFunc
}{
{
name: "Test: Status + unknown",
one: New(ctx, Backup, Counts{1, 1, 1, 1}, "details", false),
two: ConnectorStatus{},
expectOP: Backup,
expected: Counts{1, 1, 1, 1},
isIncomplete: assert.False,
},
{
name: "Test: unknown + Status",
one: ConnectorStatus{},
two: New(ctx, Backup, Counts{1, 1, 1, 1}, "details", false),
expectOP: Backup,
expected: Counts{1, 1, 1, 1},
isIncomplete: assert.False,
},
{
name: "Test: complete + complete",
one: New(ctx, Backup, Counts{1, 1, 3, 0}, "details", false),
two: New(ctx, Backup, Counts{3, 3, 3, 0}, "details", false),
expectOP: Backup,
expected: Counts{4, 4, 6, 0},
isIncomplete: assert.False,
},
{
name: "Test: complete + incomplete",
one: New(ctx, Restore, Counts{17, 17, 13, 0}, "details", false),
two: New(ctx, Restore, Counts{12, 9, 8, 0}, "details", true),
expectOP: Restore,
expected: Counts{29, 26, 21, 0},
isIncomplete: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
result := Combine(test.one, test.two)
assert.Equal(t, result.op, test.expectOP)
assert.Equal(t, test.expected.Folders, result.Metrics.Folders)
assert.Equal(t, test.expected.Objects, result.Metrics.Objects)
assert.Equal(t, test.expected.Successes, result.Metrics.Successes)
assert.Equal(t, test.expected.Bytes, result.Metrics.Bytes)
test.isIncomplete(t, result.incomplete)
})
}
}

View File

@ -68,7 +68,7 @@ func NewGraphConnector(
itemClient *http.Client, itemClient *http.Client,
acct account.Account, acct account.Account,
r resource, r resource,
errs *fault.Errors, errs *fault.Bus,
) (*GraphConnector, error) { ) (*GraphConnector, error) {
m365, err := acct.M365Config() m365, err := acct.M365Config()
if err != nil { if err != nil {
@ -129,7 +129,7 @@ func (gc *GraphConnector) createService() (*graph.Service, error) {
// setTenantUsers queries the M365 to identify the users in the // setTenantUsers queries the M365 to identify the users in the
// workspace. The users field is updated during this method // workspace. The users field is updated during this method
// iff the returned error is nil // iff the returned error is nil
func (gc *GraphConnector) setTenantUsers(ctx context.Context, errs *fault.Errors) error { func (gc *GraphConnector) setTenantUsers(ctx context.Context, errs *fault.Bus) error {
ctx, end := D.Span(ctx, "gc:setTenantUsers") ctx, end := D.Span(ctx, "gc:setTenantUsers")
defer end() defer end()
@ -160,7 +160,7 @@ func (gc *GraphConnector) GetUsersIds() []string {
// setTenantSites queries the M365 to identify the sites in the // setTenantSites queries the M365 to identify the sites in the
// workspace. The sites field is updated during this method // workspace. The sites field is updated during this method
// iff the returned error is nil. // iff the returned error is nil.
func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Errors) error { func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Bus) error {
gc.Sites = map[string]string{} gc.Sites = map[string]string{}
ctx, end := D.Span(ctx, "gc:setTenantSites") ctx, end := D.Span(ctx, "gc:setTenantSites")
@ -232,7 +232,7 @@ func (gc *GraphConnector) GetSiteIDs() []string {
func (gc *GraphConnector) UnionSiteIDsAndWebURLs( func (gc *GraphConnector) UnionSiteIDsAndWebURLs(
ctx context.Context, ctx context.Context,
ids, urls []string, ids, urls []string,
errs *fault.Errors, errs *fault.Bus,
) ([]string, error) { ) ([]string, error) {
if len(gc.Sites) == 0 { if len(gc.Sites) == 0 {
if err := gc.setTenantSites(ctx, errs); err != nil { if err := gc.setTenantSites(ctx, errs); err != nil {
@ -314,7 +314,7 @@ func getResources(
query func(context.Context, graph.Servicer) (serialization.Parsable, error), query func(context.Context, graph.Servicer) (serialization.Parsable, error),
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error), parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
identify func(any) (string, string, error), identify func(any) (string, string, error),
errs *fault.Errors, errs *fault.Bus,
) (map[string]string, error) { ) (map[string]string, error) {
resources := map[string]string{} resources := map[string]string{}
@ -330,15 +330,17 @@ func getResources(
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...) return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
} }
el := errs.Local()
callbackFunc := func(item any) bool { callbackFunc := func(item any) bool {
if errs.Err() != nil { if el.Failure() != nil {
return false return false
} }
k, v, err := identify(item) k, v, err := identify(item)
if err != nil { if err != nil {
if !errors.Is(err, errKnownSkippableCase) { if !errors.Is(err, errKnownSkippableCase) {
errs.Add(clues.Stack(err). el.AddRecoverable(clues.Stack(err).
WithClues(ctx). WithClues(ctx).
With("query_url", gs.Adapter().GetBaseUrl())) With("query_url", gs.Adapter().GetBaseUrl()))
} }
@ -355,5 +357,5 @@ func getResources(
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...) return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
} }
return resources, errs.Err() return resources, el.Failure()
} }

View File

@ -111,7 +111,7 @@ func (medc MockExchangeDataCollection) DoNotMergeItems() bool { return med
// channel is closed when there are no more items available. // channel is closed when there are no more items available.
func (medc *MockExchangeDataCollection) Items( func (medc *MockExchangeDataCollection) Items(
ctx context.Context, ctx context.Context,
_ *fault.Errors, // unused _ *fault.Bus, // unused
) <-chan data.Stream { ) <-chan data.Stream {
res := make(chan data.Stream) res := make(chan data.Stream)

View File

@ -48,7 +48,7 @@ func (mlc *MockListCollection) PreviousPath() path.Path {
func (mlc *MockListCollection) Items( func (mlc *MockListCollection) Items(
ctx context.Context, ctx context.Context,
_ *fault.Errors, // unused _ *fault.Bus, // unused
) <-chan data.Stream { ) <-chan data.Stream {
res := make(chan data.Stream) res := make(chan data.Stream)

View File

@ -2,12 +2,13 @@ package api
import ( import (
"context" "context"
"fmt"
"github.com/alcionai/clues"
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives" msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
mssites "github.com/microsoftgraph/msgraph-sdk-go/sites" mssites "github.com/microsoftgraph/msgraph-sdk-go/sites"
msusers "github.com/microsoftgraph/msgraph-sdk-go/users" msusers "github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/graph/api" "github.com/alcionai/corso/src/internal/connector/graph/api"
@ -16,10 +17,7 @@ import (
func getValues[T any](l api.PageLinker) ([]T, error) { func getValues[T any](l api.PageLinker) ([]T, error) {
page, ok := l.(interface{ GetValue() []T }) page, ok := l.(interface{ GetValue() []T })
if !ok { if !ok {
return nil, errors.Errorf( return nil, clues.New("page does not comply with GetValue() interface").With("page_item_type", fmt.Sprintf("%T", l))
"response of type [%T] does not comply with GetValue() interface",
l,
)
} }
return page.GetValue(), nil return page.GetValue(), nil
@ -69,8 +67,11 @@ func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, erro
) )
resp, err = p.builder.Get(ctx, p.options) resp, err = p.builder.Get(ctx, p.options)
if err != nil {
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
}
return resp, err return resp, nil
} }
func (p *driveItemPager) SetNext(link string) { func (p *driveItemPager) SetNext(link string) {
@ -163,8 +164,11 @@ func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
) )
resp, err = p.builder.Get(ctx, p.options) resp, err = p.builder.Get(ctx, p.options)
if err != nil {
return nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
}
return resp, err return resp, nil
} }
func (p *siteDrivePager) SetNext(link string) { func (p *siteDrivePager) SetNext(link string) {

View File

@ -10,10 +10,12 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spatialcurrent/go-lazy/pkg/lazy" "github.com/spatialcurrent/go-lazy/pkg/lazy"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
@ -82,7 +84,7 @@ type Collection struct {
type itemReaderFunc func( type itemReaderFunc func(
hc *http.Client, hc *http.Client,
item models.DriveItemable, item models.DriveItemable,
) (itemInfo details.ItemInfo, itemData io.ReadCloser, err error) ) (details.ItemInfo, io.ReadCloser, error)
// itemMetaReaderFunc returns a reader for the metadata of the // itemMetaReaderFunc returns a reader for the metadata of the
// specified item // specified item
@ -141,9 +143,9 @@ func (oc *Collection) Add(item models.DriveItemable) {
// Items() returns the channel containing M365 Exchange objects // Items() returns the channel containing M365 Exchange objects
func (oc *Collection) Items( func (oc *Collection) Items(
ctx context.Context, ctx context.Context,
errs *fault.Errors, // TODO: currently unused while onedrive isn't up to date with clues/fault errs *fault.Bus, // TODO: currently unused while onedrive isn't up to date with clues/fault
) <-chan data.Stream { ) <-chan data.Stream {
go oc.populateItems(ctx) go oc.populateItems(ctx, errs)
return oc.data return oc.data
} }
@ -216,23 +218,22 @@ func (od *Item) ModTime() time.Time {
// populateItems iterates through items added to the collection // populateItems iterates through items added to the collection
// and uses the collection `itemReader` to read the item // and uses the collection `itemReader` to read the item
func (oc *Collection) populateItems(ctx context.Context) { func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
var ( var (
errs error
byteCount int64 byteCount int64
itemsRead int64 itemsRead int64
dirsRead int64 dirsRead int64
itemsFound int64 itemsFound int64
dirsFound int64 dirsFound int64
wg sync.WaitGroup wg sync.WaitGroup
m sync.Mutex el = errs.Local()
) )
// Retrieve the OneDrive folder path to set later in // Retrieve the OneDrive folder path to set later in
// `details.OneDriveInfo` // `details.OneDriveInfo`
parentPathString, err := path.GetDriveFolderPath(oc.folderPath) parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
if err != nil { if err != nil {
oc.reportAsCompleted(ctx, 0, 0, 0, err) oc.reportAsCompleted(ctx, 0, 0, 0, clues.Wrap(err, "getting drive path").WithClues(ctx))
return return
} }
@ -247,14 +248,8 @@ func (oc *Collection) populateItems(ctx context.Context) {
semaphoreCh := make(chan struct{}, urlPrefetchChannelBufferSize) semaphoreCh := make(chan struct{}, urlPrefetchChannelBufferSize)
defer close(semaphoreCh) defer close(semaphoreCh)
errUpdater := func(id string, err error) {
m.Lock()
errs = support.WrapAndAppend(id, err, errs)
m.Unlock()
}
for _, item := range oc.driveItems { for _, item := range oc.driveItems {
if oc.ctrl.FailFast && errs != nil { if el.Failure() != nil {
break break
} }
@ -262,22 +257,27 @@ func (oc *Collection) populateItems(ctx context.Context) {
wg.Add(1) wg.Add(1)
go func(item models.DriveItemable) { go func(ctx context.Context, item models.DriveItemable) {
defer wg.Done() defer wg.Done()
defer func() { <-semaphoreCh }() defer func() { <-semaphoreCh }()
// Read the item // Read the item
var ( var (
itemID = *item.GetId() itemID = ptr.Val(item.GetId())
itemName = *item.GetName() itemName = ptr.Val(item.GetName())
itemSize = *item.GetSize() itemSize = ptr.Val(item.GetSize())
itemInfo details.ItemInfo itemInfo details.ItemInfo
itemMeta io.ReadCloser itemMeta io.ReadCloser
itemMetaSize int itemMetaSize int
metaSuffix string metaSuffix string
err error
) )
ctx = clues.Add(ctx,
"restore_item_id", itemID,
"restore_item_name", itemName,
"restore_item_size", itemSize,
"restore_item_info", itemInfo)
isFile := item.GetFile() != nil isFile := item.GetFile() != nil
if isFile { if isFile {
@ -301,9 +301,8 @@ func (oc *Collection) populateItems(ctx context.Context) {
itemMetaSize = 2 itemMetaSize = 2
} else { } else {
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item) itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
if err != nil { if err != nil {
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions")) el.AddRecoverable(clues.Wrap(err, "getting item permissions"))
return return
} }
} }
@ -351,7 +350,7 @@ func (oc *Collection) populateItems(ctx context.Context) {
// check for errors following retries // check for errors following retries
if err != nil { if err != nil {
errUpdater(itemID, err) el.AddRecoverable(clues.Stack(err).WithClues(ctx))
return nil, err return nil, err
} }
@ -361,8 +360,7 @@ func (oc *Collection) populateItems(ctx context.Context) {
itemData, itemData,
observe.ItemBackupMsg, observe.ItemBackupMsg,
observe.PII(itemName+dataSuffix), observe.PII(itemName+dataSuffix),
itemSize, itemSize)
)
go closer() go closer()
return progReader, nil return progReader, nil
@ -419,15 +417,15 @@ func (oc *Collection) populateItems(ctx context.Context) {
atomic.AddInt64(&byteCount, itemSize) atomic.AddInt64(&byteCount, itemSize)
folderProgress <- struct{}{} folderProgress <- struct{}{}
}(item) }(ctx, item)
} }
wg.Wait() wg.Wait()
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, errs) oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, el.Failure())
} }
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, errs error) { func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, err error) {
close(oc.data) close(oc.data)
status := support.CreateStatus(ctx, support.Backup, status := support.CreateStatus(ctx, support.Backup,
@ -437,7 +435,7 @@ func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRe
Successes: itemsRead, // items read successfully, Successes: itemsRead, // items read successfully,
TotalBytes: byteCount, // Number of bytes read in the operation, TotalBytes: byteCount, // Number of bytes read in the operation,
}, },
errs, err,
oc.folderPath.Folder(false), // Additional details oc.folderPath.Folder(false), // Additional details
) )
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String()) logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())

View File

@ -13,11 +13,13 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
@ -117,32 +119,40 @@ func NewCollections(
func deserializeMetadata( func deserializeMetadata(
ctx context.Context, ctx context.Context,
cols []data.RestoreCollection, cols []data.RestoreCollection,
errs *fault.Bus,
) (map[string]string, map[string]map[string]string, error) { ) (map[string]string, map[string]map[string]string, error) {
logger.Ctx(ctx).Infow( logger.Ctx(ctx).Infow(
"deserialzing previous backup metadata", "deserialzing previous backup metadata",
"num_collections", "num_collections", len(cols))
len(cols),
var (
prevDeltas = map[string]string{}
prevFolders = map[string]map[string]string{}
el = errs.Local()
) )
prevDeltas := map[string]string{}
prevFolders := map[string]map[string]string{}
for _, col := range cols { for _, col := range cols {
items := col.Items(ctx, nil) // TODO: fault.Errors instead of nil if el.Failure() != nil {
break
}
items := col.Items(ctx, errs)
for breakLoop := false; !breakLoop; { for breakLoop := false; !breakLoop; {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, errors.Wrap(ctx.Err(), "deserialzing previous backup metadata") return nil, nil, clues.Wrap(ctx.Err(), "deserialzing previous backup metadata").WithClues(ctx)
case item, ok := <-items: case item, ok := <-items:
if !ok { if !ok {
// End of collection items.
breakLoop = true breakLoop = true
break break
} }
var err error var (
err error
ictx = clues.Add(ctx, "item_uuid", item.UUID())
)
switch item.UUID() { switch item.UUID() {
case graph.PreviousPathFileName: case graph.PreviousPathFileName:
@ -152,11 +162,9 @@ func deserializeMetadata(
err = deserializeMap(item.ToReader(), prevDeltas) err = deserializeMap(item.ToReader(), prevDeltas)
default: default:
logger.Ctx(ctx).Infow( logger.Ctx(ictx).Infow(
"skipping unknown metadata file", "skipping unknown metadata file",
"file_name", "file_name", item.UUID())
item.UUID(),
)
continue continue
} }
@ -173,20 +181,15 @@ func deserializeMetadata(
// we end up in a situation where we're sourcing items from the wrong // we end up in a situation where we're sourcing items from the wrong
// base in kopia wrapper. // base in kopia wrapper.
if errors.Is(err, errExistingMapping) { if errors.Is(err, errExistingMapping) {
return nil, nil, errors.Wrapf( return nil, nil, clues.Wrap(err, "deserializing metadata file").WithClues(ictx)
err,
"deserializing metadata file %s",
item.UUID(),
)
} }
logger.Ctx(ctx).Errorw( err = clues.Stack(err).WithClues(ictx)
"deserializing base backup metadata. Falling back to full backup for selected drives",
"error", el.AddRecoverable(err)
err, logger.Ctx(ictx).
"file_name", With("err", err).
item.UUID(), Errorw("deserializing base backup metadata", clues.InErr(err).Slice()...)
)
} }
} }
@ -213,10 +216,10 @@ func deserializeMetadata(
} }
} }
return prevDeltas, prevFolders, nil return prevDeltas, prevFolders, el.Failure()
} }
var errExistingMapping = errors.New("mapping already exists for same drive ID") var errExistingMapping = clues.New("mapping already exists for same drive ID")
// deserializeMap takes an reader and a map of already deserialized items and // deserializeMap takes an reader and a map of already deserialized items and
// adds the newly deserialized items to alreadyFound. Items are only added to // adds the newly deserialized items to alreadyFound. Items are only added to
@ -242,7 +245,7 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
} }
if duplicate { if duplicate {
return errors.WithStack(errExistingMapping) return clues.Stack(errExistingMapping)
} }
maps.Copy(alreadyFound, tmp) maps.Copy(alreadyFound, tmp)
@ -255,8 +258,9 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
func (c *Collections) Get( func (c *Collections) Get(
ctx context.Context, ctx context.Context,
prevMetadata []data.RestoreCollection, prevMetadata []data.RestoreCollection,
errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata) prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -264,7 +268,7 @@ func (c *Collections) Get(
// Enumerate drives for the specified resourceOwner // Enumerate drives for the specified resourceOwner
pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil) pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, clues.Stack(err).WithClues(ctx).With(graph.ErrData(err)...)
} }
retry := c.source == OneDriveSource retry := c.source == OneDriveSource
@ -287,39 +291,33 @@ func (c *Collections) Get(
excludedItems = map[string]struct{}{} excludedItems = map[string]struct{}{}
) )
// Update the collection map with items from each drive
for _, d := range drives { for _, d := range drives {
driveID := *d.GetId() var (
driveName := *d.GetName() driveID = ptr.Val(d.GetId())
driveName = ptr.Val(d.GetName())
prevDelta = prevDeltas[driveID]
oldPaths = oldPathsByDriveID[driveID]
numOldDelta = 0
)
prevDelta := prevDeltas[driveID]
oldPaths := oldPathsByDriveID[driveID]
numOldDelta := 0
if len(prevDelta) > 0 { if len(prevDelta) > 0 {
numOldDelta++ numOldDelta++
} }
logger.Ctx(ctx).Infow( logger.Ctx(ctx).Infow(
"previous metadata for drive", "previous metadata for drive",
"num_paths_entries", "num_paths_entries", len(oldPaths),
len(oldPaths), "num_deltas_entries", numOldDelta)
"num_deltas_entries",
numOldDelta)
delta, paths, excluded, err := collectItems( delta, paths, excluded, err := collectItems(
ctx, ctx,
c.itemPagerFunc( c.itemPagerFunc(c.service, driveID, ""),
c.service,
driveID,
"",
),
driveID, driveID,
driveName, driveName,
c.UpdateCollections, c.UpdateCollections,
oldPaths, oldPaths,
prevDelta, prevDelta,
) errs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -343,7 +341,6 @@ func (c *Collections) Get(
// token because it thinks the folder paths weren't persisted. // token because it thinks the folder paths weren't persisted.
folderPaths[driveID] = map[string]string{} folderPaths[driveID] = map[string]string{}
maps.Copy(folderPaths[driveID], paths) maps.Copy(folderPaths[driveID], paths)
maps.Copy(excludedItems, excluded) maps.Copy(excludedItems, excluded)
logger.Ctx(ctx).Infow( logger.Ctx(ctx).Infow(
@ -372,18 +369,15 @@ func (c *Collections) Get(
graph.NewMetadataEntry(graph.PreviousPathFileName, folderPaths), graph.NewMetadataEntry(graph.PreviousPathFileName, folderPaths),
graph.NewMetadataEntry(graph.DeltaURLsFileName, deltaURLs), graph.NewMetadataEntry(graph.DeltaURLsFileName, deltaURLs),
}, },
c.statusUpdater, c.statusUpdater)
)
if err != nil { if err != nil {
// Technically it's safe to continue here because the logic for starting an // Technically it's safe to continue here because the logic for starting an
// incremental backup should eventually find that the metadata files are // incremental backup should eventually find that the metadata files are
// empty/missing and default to a full backup. // empty/missing and default to a full backup.
logger.Ctx(ctx).Warnw( logger.Ctx(ctx).
"making metadata collection for future incremental backups", With("err", err).
"error", Infow("making metadata collection for future incremental backups", clues.InErr(err).Slice()...)
err,
)
} else { } else {
collections = append(collections, metadata) collections = append(collections, metadata)
} }
@ -453,8 +447,15 @@ func (c *Collections) UpdateCollections(
newPaths map[string]string, newPaths map[string]string,
excluded map[string]struct{}, excluded map[string]struct{},
invalidPrevDelta bool, invalidPrevDelta bool,
errs *fault.Bus,
) error { ) error {
el := errs.Local()
for _, item := range items { for _, item := range items {
if el.Failure() != nil {
break
}
var ( var (
prevPath path.Path prevPath path.Path
prevCollectionPath path.Path prevCollectionPath path.Path
@ -480,25 +481,30 @@ func (c *Collections) UpdateCollections(
continue continue
} }
var (
itemID = ptr.Val(item.GetId())
ictx = clues.Add(ctx, "update_item_id", itemID)
)
if item.GetParentReference() == nil || if item.GetParentReference() == nil ||
item.GetParentReference().GetId() == nil || item.GetParentReference().GetId() == nil ||
(item.GetDeleted() == nil && item.GetParentReference().GetPath() == nil) { (item.GetDeleted() == nil && item.GetParentReference().GetPath() == nil) {
err := clues.New("no parent reference").With("item_id", *item.GetId()) el.AddRecoverable(clues.New("item missing parent reference").
if item.GetName() != nil { WithClues(ictx).
err = err.With("item_name", *item.GetName()) With("item_id", itemID, "item_name", ptr.Val(item.GetName())))
}
return err continue
} }
// Create a collection for the parent of this item // Create a collection for the parent of this item
collectionID := *item.GetParentReference().GetId() collectionID := ptr.Val(item.GetParentReference().GetId())
ictx = clues.Add(ictx, "collection_id", collectionID)
var collectionPathStr string var collectionPathStr string
if item.GetDeleted() == nil { if item.GetDeleted() == nil {
collectionPathStr = *item.GetParentReference().GetPath() collectionPathStr = ptr.Val(item.GetParentReference().GetPath())
} else { } else {
collectionPathStr, ok = oldPaths[*item.GetParentReference().GetId()] collectionPathStr, ok = oldPaths[ptr.Val(item.GetParentReference().GetId())]
if !ok { if !ok {
// This collection was created and destroyed in // This collection was created and destroyed in
// between the current and previous invocation // between the current and previous invocation
@ -510,25 +516,26 @@ func (c *Collections) UpdateCollections(
collectionPathStr, collectionPathStr,
c.tenant, c.tenant,
c.resourceOwner, c.resourceOwner,
c.source, c.source)
)
if err != nil { if err != nil {
return err return clues.Stack(err).WithClues(ictx)
} }
// Skip items that don't match the folder selectors we were given. // Skip items that don't match the folder selectors we were given.
if shouldSkipDrive(ctx, collectionPath, c.matcher, driveName) { if shouldSkipDrive(ictx, collectionPath, c.matcher, driveName) {
logger.Ctx(ctx).Infof("Skipping path %s", collectionPath.String()) logger.Ctx(ictx).Infow("Skipping path", "skipped_path", collectionPath.String())
continue continue
} }
switch { switch {
case item.GetFolder() != nil, item.GetPackage() != nil: case item.GetFolder() != nil, item.GetPackage() != nil:
prevPathStr, ok := oldPaths[*item.GetId()] prevPathStr, ok := oldPaths[itemID]
if ok { if ok {
prevPath, err = path.FromDataLayerPath(prevPathStr, false) prevPath, err = path.FromDataLayerPath(prevPathStr, false)
if err != nil { if err != nil {
return clues.Wrap(err, "invalid previous path").With("path_string", prevPathStr) el.AddRecoverable(clues.Wrap(err, "invalid previous path").
WithClues(ictx).
With("path_string", prevPathStr))
} }
} }
@ -536,7 +543,7 @@ func (c *Collections) UpdateCollections(
// Nested folders also return deleted delta results so we don't have to // Nested folders also return deleted delta results so we don't have to
// worry about doing a prefix search in the map to remove the subtree of // worry about doing a prefix search in the map to remove the subtree of
// the deleted folder/package. // the deleted folder/package.
delete(newPaths, *item.GetId()) delete(newPaths, itemID)
if prevPath == nil { if prevPath == nil {
// It is possible that an item was created and // It is possible that an item was created and
@ -555,10 +562,9 @@ func (c *Collections) UpdateCollections(
c.statusUpdater, c.statusUpdater,
c.source, c.source,
c.ctrl, c.ctrl,
invalidPrevDelta, invalidPrevDelta)
)
c.CollectionMap[*item.GetId()] = col c.CollectionMap[itemID] = col
break break
} }
@ -568,14 +574,16 @@ func (c *Collections) UpdateCollections(
// parentRef or such. // parentRef or such.
folderPath, err := collectionPath.Append(*item.GetName(), false) folderPath, err := collectionPath.Append(*item.GetName(), false)
if err != nil { if err != nil {
logger.Ctx(ctx).Errorw("failed building collection path", "error", err) logger.Ctx(ictx).Errorw("building collection path", "error", err)
return err el.AddRecoverable(clues.Stack(err).WithClues(ictx))
continue
} }
// Moved folders don't cause delta results for any subfolders nested in // Moved folders don't cause delta results for any subfolders nested in
// them. We need to go through and update paths to handle that. We only // them. We need to go through and update paths to handle that. We only
// update newPaths so we don't accidentally clobber previous deletes. // update newPaths so we don't accidentally clobber previous deletes.
updatePath(newPaths, *item.GetId(), folderPath.String()) updatePath(newPaths, itemID, folderPath.String())
found, err := updateCollectionPaths(*item.GetId(), c.CollectionMap, folderPath) found, err := updateCollectionPaths(*item.GetId(), c.CollectionMap, folderPath)
if err != nil { if err != nil {
@ -598,7 +606,7 @@ func (c *Collections) UpdateCollections(
c.ctrl, c.ctrl,
invalidPrevDelta, invalidPrevDelta,
) )
c.CollectionMap[*item.GetId()] = col c.CollectionMap[itemID] = col
c.NumContainers++ c.NumContainers++
} }
} }
@ -615,7 +623,7 @@ func (c *Collections) UpdateCollections(
// deleted, we want to avoid it. If it was // deleted, we want to avoid it. If it was
// renamed/moved/modified, we still have to drop the // renamed/moved/modified, we still have to drop the
// original one and download a fresh copy. // original one and download a fresh copy.
excluded[*item.GetId()] = struct{}{} excluded[itemID] = struct{}{}
} }
if item.GetDeleted() != nil { if item.GetDeleted() != nil {
@ -679,11 +687,11 @@ func (c *Collections) UpdateCollections(
} }
default: default:
return errors.Errorf("item type not supported. item name : %s", *item.GetName()) return clues.New("item type not supported").WithClues(ctx)
} }
} }
return nil return el.Failure()
} }
func shouldSkipDrive(ctx context.Context, drivePath path.Path, m folderMatcher, driveName string) bool { func shouldSkipDrive(ctx context.Context, drivePath path.Path, m folderMatcher, driveName string) bool {
@ -705,7 +713,7 @@ func GetCanonicalPath(p, tenant, resourceOwner string, source driveSource) (path
case SharePointSource: case SharePointSource:
result, err = pathBuilder.ToDataLayerSharePointPath(tenant, resourceOwner, path.LibrariesCategory, false) result, err = pathBuilder.ToDataLayerSharePointPath(tenant, resourceOwner, path.LibrariesCategory, false)
default: default:
return nil, errors.Errorf("unrecognized drive data source") return nil, clues.New("unrecognized data source")
} }
if err != nil { if err != nil {
@ -719,7 +727,7 @@ func includePath(ctx context.Context, m folderMatcher, folderPath path.Path) boo
// Check if the folder is allowed by the scope. // Check if the folder is allowed by the scope.
folderPathString, err := path.GetDriveFolderPath(folderPath) folderPathString, err := path.GetDriveFolderPath(folderPath)
if err != nil { if err != nil {
logger.Ctx(ctx).Error(err) logger.Ctx(ctx).With("err", err).Error("getting drive folder path")
return true return true
} }

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
) )
@ -702,7 +703,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
outputFolderMap, outputFolderMap,
excludes, excludes,
false, false,
) fault.New(true))
tt.expect(t, err) tt.expect(t, err)
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap), "total collections") assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap), "total collections")
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count") assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
@ -1058,7 +1059,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() {
cols = append(cols, data.NotFoundRestoreCollection{Collection: mc}) cols = append(cols, data.NotFoundRestoreCollection{Collection: mc})
} }
deltas, paths, err := deserializeMetadata(ctx, cols) deltas, paths, err := deserializeMetadata(ctx, cols, fault.New(true))
test.errCheck(t, err) test.errCheck(t, err)
assert.Equal(t, test.expectedDeltas, deltas) assert.Equal(t, test.expectedDeltas, deltas)
@ -1597,7 +1598,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
assert.NoError(t, err, "creating metadata collection") assert.NoError(t, err, "creating metadata collection")
prevMetadata := []data.RestoreCollection{data.NotFoundRestoreCollection{Collection: mc}} prevMetadata := []data.RestoreCollection{data.NotFoundRestoreCollection{Collection: mc}}
cols, delList, err := c.Get(ctx, prevMetadata) cols, delList, err := c.Get(ctx, prevMetadata, fault.New(true))
test.errCheck(t, err) test.errCheck(t, err)
if err != nil { if err != nil {
@ -1607,9 +1608,12 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
for _, baseCol := range cols { for _, baseCol := range cols {
folderPath := baseCol.FullPath().String() folderPath := baseCol.FullPath().String()
if folderPath == metadataPath.String() { if folderPath == metadataPath.String() {
deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{ deltas, paths, err := deserializeMetadata(
data.NotFoundRestoreCollection{Collection: baseCol}, ctx,
}) []data.RestoreCollection{
data.NotFoundRestoreCollection{Collection: baseCol},
},
fault.New(true))
if !assert.NoError(t, err, "deserializing metadata") { if !assert.NoError(t, err, "deserializing metadata") {
continue continue
} }
@ -1804,6 +1808,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() {
newPaths map[string]string, newPaths map[string]string,
excluded map[string]struct{}, excluded map[string]struct{},
doNotMergeItems bool, doNotMergeItems bool,
errs *fault.Bus,
) error { ) error {
return nil return nil
} }
@ -1816,7 +1821,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() {
collectorFunc, collectorFunc,
map[string]string{}, map[string]string{},
test.prevDelta, test.prevDelta,
) fault.New(true))
require.ErrorIs(suite.T(), err, test.err, "delta fetch err") require.ErrorIs(suite.T(), err, test.err, "delta fetch err")
require.Equal(suite.T(), test.deltaURL, delta.URL, "delta url") require.Equal(suite.T(), test.deltaURL, delta.URL, "delta url")

View File

@ -6,9 +6,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/alcionai/clues"
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
@ -17,10 +17,11 @@ import (
gapi "github.com/alcionai/corso/src/internal/connector/graph/api" gapi "github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
) )
var errFolderNotFound = errors.New("folder not found") var errFolderNotFound = clues.New("folder not found")
const ( const (
getDrivesRetries = 3 getDrivesRetries = 3
@ -77,8 +78,6 @@ func drives(
retry bool, retry bool,
) ([]models.Driveable, error) { ) ([]models.Driveable, error) {
var ( var (
err error
page gapi.PageLinker
numberOfRetries = getDrivesRetries numberOfRetries = getDrivesRetries
drives = []models.Driveable{} drives = []models.Driveable{}
) )
@ -89,30 +88,31 @@ func drives(
// Loop through all pages returned by Graph API. // Loop through all pages returned by Graph API.
for { for {
var (
err error
page gapi.PageLinker
)
// Retry Loop for Drive retrieval. Request can timeout // Retry Loop for Drive retrieval. Request can timeout
for i := 0; i <= numberOfRetries; i++ { for i := 0; i <= numberOfRetries; i++ {
page, err = pager.GetPage(ctx) page, err = pager.GetPage(ctx)
if err != nil { if err != nil {
// Various error handling. May return an error or perform a retry. // Various error handling. May return an error or perform a retry.
detailedError := support.ConnectorStackErrorTraceWrap(err, "").Error() errMsg := support.ConnectorStackErrorTraceWrap(err, "").Error()
if strings.Contains(detailedError, userMysiteURLNotFound) || if strings.Contains(errMsg, userMysiteURLNotFound) ||
strings.Contains(detailedError, userMysiteURLNotFoundMsg) || strings.Contains(errMsg, userMysiteURLNotFoundMsg) ||
strings.Contains(detailedError, userMysiteNotFound) || strings.Contains(errMsg, userMysiteNotFound) ||
strings.Contains(detailedError, userMysiteNotFoundMsg) { strings.Contains(errMsg, userMysiteNotFoundMsg) {
logger.Ctx(ctx).Infof("resource owner does not have a drive") logger.Ctx(ctx).Infof("resource owner does not have a drive")
return make([]models.Driveable, 0), nil // no license or drives. return make([]models.Driveable, 0), nil // no license or drives.
} }
if strings.Contains(detailedError, contextDeadlineExceeded) && i < numberOfRetries { if strings.Contains(errMsg, contextDeadlineExceeded) && i < numberOfRetries {
time.Sleep(time.Duration(3*(i+1)) * time.Second) time.Sleep(time.Duration(3*(i+1)) * time.Second)
continue continue
} }
return nil, errors.Wrapf( return nil, clues.Wrap(err, "retrieving drives").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to retrieve drives. details: %s",
detailedError,
)
} }
// No error encountered, break the retry loop so we can extract results // No error encountered, break the retry loop so we can extract results
@ -122,7 +122,7 @@ func drives(
tmp, err := pager.ValuesIn(page) tmp, err := pager.ValuesIn(page)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "extracting drives from response") return nil, clues.Wrap(err, "extracting drives from response").WithClues(ctx).With(graph.ErrData(err)...)
} }
drives = append(drives, tmp...) drives = append(drives, tmp...)
@ -135,7 +135,7 @@ func drives(
pager.SetNext(nextLink) pager.SetNext(nextLink)
} }
logger.Ctx(ctx).Debugf("Found %d drives", len(drives)) logger.Ctx(ctx).Debugf("retrieved %d valid drives", len(drives))
return drives, nil return drives, nil
} }
@ -149,6 +149,7 @@ type itemCollector func(
newPaths map[string]string, newPaths map[string]string,
excluded map[string]struct{}, excluded map[string]struct{},
validPrevDelta bool, validPrevDelta bool,
errs *fault.Bus,
) error ) error
type itemPager interface { type itemPager interface {
@ -193,6 +194,7 @@ func collectItems(
collector itemCollector, collector itemCollector,
oldPaths map[string]string, oldPaths map[string]string,
prevDelta string, prevDelta string,
errs *fault.Bus,
) (DeltaUpdate, map[string]string, map[string]struct{}, error) { ) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
var ( var (
newDeltaURL = "" newDeltaURL = ""
@ -220,19 +222,17 @@ func collectItems(
} }
if err != nil { if err != nil {
return DeltaUpdate{}, nil, nil, errors.Wrapf( return DeltaUpdate{}, nil, nil, clues.Wrap(err, "getting page").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to query drive items. details: %s",
support.ConnectorStackErrorTrace(err),
)
} }
vals, err := pager.ValuesIn(page) vals, err := pager.ValuesIn(page)
if err != nil { if err != nil {
return DeltaUpdate{}, nil, nil, errors.Wrap(err, "extracting items from response") return DeltaUpdate{}, nil, nil, clues.Wrap(err, "extracting items from response").
WithClues(ctx).
With(graph.ErrData(err)...)
} }
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded, invalidPrevDelta) err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded, invalidPrevDelta, errs)
if err != nil { if err != nil {
return DeltaUpdate{}, nil, nil, err return DeltaUpdate{}, nil, nil, err
} }
@ -277,25 +277,16 @@ func getFolder(
foundItem, err = builder.Get(ctx, nil) foundItem, err = builder.Get(ctx, nil)
if err != nil { if err != nil {
var oDataError *odataerrors.ODataError if graph.IsErrDeletedInFlight(err) {
if errors.As(err, &oDataError) && return nil, clues.Stack(errFolderNotFound, err).WithClues(ctx).With(graph.ErrData(err)...)
oDataError.GetError() != nil &&
oDataError.GetError().GetCode() != nil &&
*oDataError.GetError().GetCode() == itemNotFoundErrorCode {
return nil, errors.WithStack(errFolderNotFound)
} }
return nil, errors.Wrapf(err, return nil, clues.Wrap(err, "getting folder").WithClues(ctx).With(graph.ErrData(err)...)
"failed to get folder %s/%s. details: %s",
parentFolderID,
folderName,
support.ConnectorStackErrorTrace(err),
)
} }
// Check if the item found is a folder, fail the call if not // Check if the item found is a folder, fail the call if not
if foundItem.GetFolder() == nil { if foundItem.GetFolder() == nil {
return nil, errors.WithStack(errFolderNotFound) return nil, clues.Stack(errFolderNotFound).WithClues(ctx).With(graph.ErrData(err)...)
} }
return foundItem, nil return foundItem, nil
@ -311,16 +302,11 @@ func createItem(
// Graph SDK doesn't yet provide a POST method for `/children` so we set the `rawUrl` ourselves as recommended // Graph SDK doesn't yet provide a POST method for `/children` so we set the `rawUrl` ourselves as recommended
// here: https://github.com/microsoftgraph/msgraph-sdk-go/issues/155#issuecomment-1136254310 // here: https://github.com/microsoftgraph/msgraph-sdk-go/issues/155#issuecomment-1136254310
rawURL := fmt.Sprintf(itemChildrenRawURLFmt, driveID, parentFolderID) rawURL := fmt.Sprintf(itemChildrenRawURLFmt, driveID, parentFolderID)
builder := msdrive.NewItemsRequestBuilder(rawURL, service.Adapter()) builder := msdrive.NewItemsRequestBuilder(rawURL, service.Adapter())
newItem, err := builder.Post(ctx, newItem, nil) newItem, err := builder.Post(ctx, newItem, nil)
if err != nil { if err != nil {
return nil, errors.Wrapf( return nil, clues.Wrap(err, "creating item").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to create item. details: %s",
support.ConnectorStackErrorTrace(err),
)
} }
return newItem, nil return newItem, nil
@ -356,65 +342,71 @@ func GetAllFolders(
gs graph.Servicer, gs graph.Servicer,
pager drivePager, pager drivePager,
prefix string, prefix string,
errs *fault.Bus,
) ([]*Displayable, error) { ) ([]*Displayable, error) {
drives, err := drives(ctx, pager, true) drives, err := drives(ctx, pager, true)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getting OneDrive folders") return nil, errors.Wrap(err, "getting OneDrive folders")
} }
folders := map[string]*Displayable{} var (
folders = map[string]*Displayable{}
el = errs.Local()
)
for _, d := range drives { for _, d := range drives {
_, _, _, err = collectItems( if el.Failure() != nil {
ctx, break
defaultItemPager( }
gs,
*d.GetId(),
"",
),
*d.GetId(),
*d.GetName(),
func(
innerCtx context.Context,
driveID, driveName string,
items []models.DriveItemable,
oldPaths map[string]string,
newPaths map[string]string,
excluded map[string]struct{},
doNotMergeItems bool,
) error {
for _, item := range items {
// Skip the root item.
if item.GetRoot() != nil {
continue
}
// Only selecting folders right now, not packages. var (
if item.GetFolder() == nil { id = ptr.Val(d.GetId())
continue name = ptr.Val(d.GetName())
} )
if item.GetId() == nil || len(*item.GetId()) == 0 { ictx := clues.Add(ctx, "drive_id", id, "drive_name", name) // TODO: pii
logger.Ctx(ctx).Warn("folder without ID") collector := func(
continue innerCtx context.Context,
} driveID, driveName string,
items []models.DriveItemable,
if !strings.HasPrefix(*item.GetName(), prefix) { oldPaths map[string]string,
continue newPaths map[string]string,
} excluded map[string]struct{},
doNotMergeItems bool,
// Add the item instead of the folder because the item has more errs *fault.Bus,
// functionality. ) error {
folders[*item.GetId()] = &Displayable{item} for _, item := range items {
// Skip the root item.
if item.GetRoot() != nil {
continue
} }
return nil // Only selecting folders right now, not packages.
}, if item.GetFolder() == nil {
map[string]string{}, continue
"", }
)
itemID := ptr.Val(item.GetId())
if len(itemID) == 0 {
logger.Ctx(ctx).Info("folder missing ID")
continue
}
if !strings.HasPrefix(*item.GetName(), prefix) {
continue
}
// Add the item instead of the folder because the item has more
// functionality.
folders[itemID] = &Displayable{item}
}
return nil
}
_, _, _, err = collectItems(ictx, defaultItemPager(gs, id, ""), id, name, collector, map[string]string{}, "", errs)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "getting items for drive %s", *d.GetName()) el.AddRecoverable(clues.Wrap(err, "enumerating items in drive"))
} }
} }
@ -424,7 +416,7 @@ func GetAllFolders(
res = append(res, f) res = append(res, f)
} }
return res, nil return res, el.Failure()
} }
func DeleteItem( func DeleteItem(
@ -435,7 +427,10 @@ func DeleteItem(
) error { ) error {
err := gs.Client().DrivesById(driveID).ItemsById(itemID).Delete(ctx, nil) err := gs.Client().DrivesById(driveID).ItemsById(itemID).Delete(ctx, nil)
if err != nil { if err != nil {
return errors.Wrapf(err, "deleting item with ID %s", itemID) return clues.Wrap(err, "deleting item").
WithClues(ctx).
With("item_id", itemID).
With(graph.ErrData(err)...)
} }
return nil return nil

View File

@ -18,6 +18,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
) )
@ -399,7 +400,7 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil) pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
require.NoError(t, err) require.NoError(t, err)
allFolders, err := GetAllFolders(ctx, gs, pager, test.prefix) allFolders, err := GetAllFolders(ctx, gs, pager, test.prefix, fault.New(true))
require.NoError(t, err) require.NoError(t, err)
foundFolderIDs := []string{} foundFolderIDs := []string{}
@ -465,7 +466,7 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() {
service, service,
service.updateStatus, service.updateStatus,
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}, control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
).Get(ctx, nil) ).Get(ctx, nil, fault.New(true))
assert.NoError(t, err) assert.NoError(t, err)
// Don't expect excludes as this isn't an incremental backup. // Don't expect excludes as this isn't an incremental backup.
assert.Empty(t, excludes) assert.Empty(t, excludes)

View File

@ -4,17 +4,17 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"github.com/alcionai/clues"
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives" msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/connector/uploadsession" "github.com/alcionai/corso/src/internal/connector/uploadsession"
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
@ -33,7 +33,12 @@ func getDriveItem(
srv graph.Servicer, srv graph.Servicer,
driveID, itemID string, driveID, itemID string,
) (models.DriveItemable, error) { ) (models.DriveItemable, error) {
return srv.Client().DrivesById(driveID).ItemsById(itemID).Get(ctx, nil) di, err := srv.Client().DrivesById(driveID).ItemsById(itemID).Get(ctx, nil)
if err != nil {
return nil, clues.Wrap(err, "getting item").WithClues(ctx).With(graph.ErrData(err)...)
}
return di, nil
} }
// sharePointItemReader will return a io.ReadCloser for the specified item // sharePointItemReader will return a io.ReadCloser for the specified item
@ -69,7 +74,7 @@ func oneDriveItemMetaReader(
metaJSON, err := json.Marshal(meta) metaJSON, err := json.Marshal(meta)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, clues.Wrap(err, "marshalling json").WithClues(ctx)
} }
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
@ -106,12 +111,12 @@ func oneDriveItemReader(
func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, error) { func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, error) {
url, ok := item.GetAdditionalData()[downloadURLKey].(*string) url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
if !ok { if !ok {
return nil, fmt.Errorf("extracting file url: file %s", *item.GetId()) return nil, clues.New("extracting file url").With("item_id", ptr.Val(item.GetId()))
} }
req, err := http.NewRequest(http.MethodGet, *url, nil) req, err := http.NewRequest(http.MethodGet, *url, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "new request") return nil, clues.Wrap(err, "new request").With(graph.ErrData(err)...)
} }
//nolint:lll //nolint:lll
@ -144,7 +149,7 @@ func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, e
return resp, graph.Err503ServiceUnavailable return resp, graph.Err503ServiceUnavailable
} }
return resp, errors.New("non-2xx http response: " + resp.Status) return resp, clues.Wrap(clues.New(resp.Status), "non-2xx http response")
} }
// oneDriveItemInfo will populate a details.OneDriveInfo struct // oneDriveItemInfo will populate a details.OneDriveInfo struct
@ -171,9 +176,9 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive
return &details.OneDriveInfo{ return &details.OneDriveInfo{
ItemType: details.OneDriveItem, ItemType: details.OneDriveItem,
ItemName: *di.GetName(), ItemName: ptr.Val(di.GetName()),
Created: *di.GetCreatedDateTime(), Created: ptr.Val(di.GetCreatedDateTime()),
Modified: *di.GetLastModifiedDateTime(), Modified: ptr.Val(di.GetLastModifiedDateTime()),
DriveName: parent, DriveName: parent,
Size: itemSize, Size: itemSize,
Owner: email, Owner: email,
@ -187,11 +192,13 @@ func oneDriveItemMetaInfo(
ctx context.Context, service graph.Servicer, ctx context.Context, service graph.Servicer,
driveID string, di models.DriveItemable, driveID string, di models.DriveItemable,
) (Metadata, error) { ) (Metadata, error) {
itemID := di.GetId() perm, err := service.Client().
DrivesById(driveID).
perm, err := service.Client().DrivesById(driveID).ItemsById(*itemID).Permissions().Get(ctx, nil) ItemsById(ptr.Val(di.GetId())).
Permissions().
Get(ctx, nil)
if err != nil { if err != nil {
return Metadata{}, err return Metadata{}, clues.Wrap(err, "getting item metadata").WithClues(ctx).With(graph.ErrData(err)...)
} }
uperms := filterUserPermissions(perm.GetValue()) uperms := filterUserPermissions(perm.GetValue())
@ -223,7 +230,7 @@ func filterUserPermissions(perms []models.Permissionable) []UserPermission {
} }
up = append(up, UserPermission{ up = append(up, UserPermission{
ID: *p.GetId(), ID: ptr.Val(p.GetId()),
Roles: roles, Roles: roles,
Email: *p.GetGrantedToV2().GetUser().GetAdditionalData()["email"].(*string), Email: *p.GetGrantedToV2().GetUser().GetAdditionalData()["email"].(*string),
Expiration: p.GetExpirationDateTime(), Expiration: p.GetExpirationDateTime(),
@ -275,9 +282,9 @@ func sharePointItemInfo(di models.DriveItemable, itemSize int64) *details.ShareP
return &details.SharePointInfo{ return &details.SharePointInfo{
ItemType: details.OneDriveItem, ItemType: details.OneDriveItem,
ItemName: *di.GetName(), ItemName: ptr.Val(di.GetName()),
Created: *di.GetCreatedDateTime(), Created: ptr.Val(di.GetCreatedDateTime()),
Modified: *di.GetLastModifiedDateTime(), Modified: ptr.Val(di.GetLastModifiedDateTime()),
DriveName: parent, DriveName: parent,
Size: itemSize, Size: itemSize,
Owner: id, Owner: id,
@ -295,20 +302,18 @@ func driveItemWriter(
itemSize int64, itemSize int64,
) (io.Writer, error) { ) (io.Writer, error) {
session := msdrives.NewItemItemsItemCreateUploadSessionPostRequestBody() session := msdrives.NewItemItemsItemCreateUploadSessionPostRequestBody()
ctx = clues.Add(ctx, "upload_item_id", itemID)
r, err := service.Client().DrivesById(driveID).ItemsById(itemID).CreateUploadSession().Post(ctx, session, nil) r, err := service.Client().DrivesById(driveID).ItemsById(itemID).CreateUploadSession().Post(ctx, session, nil)
if err != nil { if err != nil {
return nil, errors.Wrapf( return nil, clues.Wrap(err, "creating item upload session").
err, WithClues(ctx).
"failed to create upload session for item %s. details: %s", With(graph.ErrData(err)...)
itemID,
support.ConnectorStackErrorTrace(err),
)
} }
url := *r.GetUploadUrl() logger.Ctx(ctx).Debug("created an upload session")
logger.Ctx(ctx).Debugf("Created an upload session for item %s. URL: %s", itemID, url) url := ptr.Val(r.GetUploadUrl())
return uploadsession.NewWriter(itemID, url, itemSize), nil return uploadsession.NewWriter(itemID, url, itemSize), nil
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/fault"
) )
type ItemIntegrationSuite struct { type ItemIntegrationSuite struct {
@ -107,6 +108,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
newPaths map[string]string, newPaths map[string]string,
excluded map[string]struct{}, excluded map[string]struct{},
doNotMergeItems bool, doNotMergeItems bool,
errs *fault.Bus,
) error { ) error {
for _, item := range items { for _, item := range items {
if item.GetFile() != nil { if item.GetFile() != nil {
@ -129,7 +131,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
itemCollector, itemCollector,
map[string]string{}, map[string]string{},
"", "",
) fault.New(true))
require.NoError(suite.T(), err) require.NoError(suite.T(), err)
// Test Requirement 2: Need a file // Test Requirement 2: Need a file

View File

@ -3,7 +3,6 @@ package onedrive
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"runtime/trace" "runtime/trace"
"sort" "sort"
@ -14,6 +13,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
@ -21,6 +21,7 @@ import (
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
@ -49,7 +50,7 @@ func getParentPermissions(
} }
if len(onedrivePath.Folders) != 0 { if len(onedrivePath.Folders) != 0 {
return nil, errors.Wrap(err, "unable to compute item permissions") return nil, errors.Wrap(err, "computing item permissions")
} }
parentPerms = []UserPermission{} parentPerms = []UserPermission{}
@ -69,7 +70,6 @@ func getParentAndCollectionPermissions(
} }
var ( var (
err error
parentPerms []UserPermission parentPerms []UserPermission
colPerms []UserPermission colPerms []UserPermission
) )
@ -89,7 +89,7 @@ func getParentAndCollectionPermissions(
// TODO(ashmrtn): For versions after this pull the permissions from the // TODO(ashmrtn): For versions after this pull the permissions from the
// current collection with Fetch(). // current collection with Fetch().
colPerms, err = getParentPermissions(collectionPath, permissions) colPerms, err := getParentPermissions(collectionPath, permissions)
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "getting collection permissions") return nil, nil, clues.Wrap(err, "getting collection permissions")
} }
@ -106,22 +106,22 @@ func RestoreCollections(
opts control.Options, opts control.Options,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Bus,
) (*support.ConnectorOperationStatus, error) { ) (*support.ConnectorOperationStatus, error) {
var ( var (
restoreMetrics support.CollectionMetrics restoreMetrics support.CollectionMetrics
restoreErrors error
metrics support.CollectionMetrics metrics support.CollectionMetrics
folderPerms map[string][]UserPermission folderPerms map[string][]UserPermission
canceled bool
// permissionIDMappings is used to map between old and new id // permissionIDMappings is used to map between old and new id
// of permissions as we restore them // of permissions as we restore them
permissionIDMappings = map[string]string{} permissionIDMappings = map[string]string{}
) )
errUpdater := func(id string, err error) { ctx = clues.Add(
restoreErrors = support.WrapAndAppend(id, err, restoreErrors) ctx,
} "backup_version", backupVersion,
"destination", dest.ContainerName)
// Reorder collections so that the parents directories are created // Reorder collections so that the parents directories are created
// before the child directories // before the child directories
@ -129,12 +129,28 @@ func RestoreCollections(
return dcs[i].FullPath().String() < dcs[j].FullPath().String() return dcs[i].FullPath().String() < dcs[j].FullPath().String()
}) })
parentPermissions := map[string][]UserPermission{} var (
el = errs.Local()
parentPermissions = map[string][]UserPermission{}
)
// Iterate through the data collections and restore the contents of each // Iterate through the data collections and restore the contents of each
for _, dc := range dcs { for _, dc := range dcs {
metrics, folderPerms, permissionIDMappings, canceled = RestoreCollection( if el.Failure() != nil {
ctx, break
}
var (
err error
ictx = clues.Add(
ctx,
"resource_owner", dc.FullPath().ResourceOwner(), // TODO: pii
"category", dc.FullPath().Category(),
"path", dc.FullPath()) // TODO: pii
)
metrics, folderPerms, permissionIDMappings, err = RestoreCollection(
ictx,
backupVersion, backupVersion,
service, service,
dc, dc,
@ -142,10 +158,12 @@ func RestoreCollections(
OneDriveSource, OneDriveSource,
dest.ContainerName, dest.ContainerName,
deets, deets,
errUpdater,
permissionIDMappings, permissionIDMappings,
opts.RestorePermissions, opts.RestorePermissions,
) errs)
if err != nil {
el.AddRecoverable(err)
}
for k, v := range folderPerms { for k, v := range folderPerms {
parentPermissions[k] = v parentPermissions[k] = v
@ -153,19 +171,20 @@ func RestoreCollections(
restoreMetrics.Combine(metrics) restoreMetrics.Combine(metrics)
if canceled { if errors.Is(err, context.Canceled) {
break break
} }
} }
return support.CreateStatus( status := support.CreateStatus(
ctx, ctx,
support.Restore, support.Restore,
len(dcs), len(dcs),
restoreMetrics, restoreMetrics,
restoreErrors, el.Failure(),
dest.ContainerName), dest.ContainerName)
nil
return status, el.Failure()
} }
// RestoreCollection handles restoration of an individual collection. // RestoreCollection handles restoration of an individual collection.
@ -181,10 +200,10 @@ func RestoreCollection(
source driveSource, source driveSource,
restoreContainerName string, restoreContainerName string,
deets *details.Builder, deets *details.Builder,
errUpdater func(string, error),
permissionIDMappings map[string]string, permissionIDMappings map[string]string,
restorePerms bool, restorePerms bool,
) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, bool) { errs *fault.Bus,
) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, error) {
ctx, end := D.Span(ctx, "gc:oneDrive:restoreCollection", D.Label("path", dc.FullPath())) ctx, end := D.Span(ctx, "gc:oneDrive:restoreCollection", D.Label("path", dc.FullPath()))
defer end() defer end()
@ -199,8 +218,7 @@ func RestoreCollection(
drivePath, err := path.ToOneDrivePath(directory) drivePath, err := path.ToOneDrivePath(directory)
if err != nil { if err != nil {
errUpdater(directory.String(), err) return metrics, folderPerms, permissionIDMappings, clues.Wrap(err, "creating drive path").WithClues(ctx)
return metrics, folderPerms, permissionIDMappings, false
} }
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
@ -210,11 +228,13 @@ func RestoreCollection(
restoreFolderElements := []string{restoreContainerName} restoreFolderElements := []string{restoreContainerName}
restoreFolderElements = append(restoreFolderElements, drivePath.Folders...) restoreFolderElements = append(restoreFolderElements, drivePath.Folders...)
ctx = clues.Add(
ctx,
"destination_elements", restoreFolderElements,
"drive_id", drivePath.DriveID)
trace.Log(ctx, "gc:oneDrive:restoreCollection", directory.String()) trace.Log(ctx, "gc:oneDrive:restoreCollection", directory.String())
logger.Ctx(ctx).Infow( logger.Ctx(ctx).Info("restoring onedrive collection")
"restoring to destination",
"origin", dc.FullPath().Folder(false),
"destination", restoreFolderElements)
parentPerms, colPerms, err := getParentAndCollectionPermissions( parentPerms, colPerms, err := getParentAndCollectionPermissions(
drivePath, drivePath,
@ -222,8 +242,7 @@ func RestoreCollection(
parentPermissions, parentPermissions,
restorePerms) restorePerms)
if err != nil { if err != nil {
errUpdater(directory.String(), err) return metrics, folderPerms, permissionIDMappings, clues.Wrap(err, "getting permissions").WithClues(ctx)
return metrics, folderPerms, permissionIDMappings, false
} }
// Create restore folders and get the folder ID of the folder the data stream will be restored in // Create restore folders and get the folder ID of the folder the data stream will be restored in
@ -237,35 +256,37 @@ func RestoreCollection(
permissionIDMappings, permissionIDMappings,
) )
if err != nil { if err != nil {
errUpdater(directory.String(), errors.Wrapf(err, "failed to create folders %v", restoreFolderElements)) return metrics, folderPerms, permissionIDMappings, clues.Wrap(err, "creating folders for restore")
return metrics, folderPerms, permissionIDMappings, false
} }
// Restore items from the collection var (
items := dc.Items(ctx, nil) // TODO: fault.Errors instead of nil el = errs.Local()
items = dc.Items(ctx, errs)
)
for { for {
if el.Failure() != nil {
break
}
select { select {
case <-ctx.Done(): case <-ctx.Done():
errUpdater("context canceled", ctx.Err()) return metrics, folderPerms, permissionIDMappings, err
return metrics, folderPerms, permissionIDMappings, true
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok { if !ok {
return metrics, folderPerms, permissionIDMappings, false return metrics, folderPerms, permissionIDMappings, nil
} }
itemPath, err := dc.FullPath().Append(itemData.UUID(), true) itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
if err != nil { if err != nil {
logger.Ctx(ctx).DPanicw("transforming item to full path", "error", err) el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
errUpdater(itemData.UUID(), err)
continue continue
} }
if source == OneDriveSource && backupVersion >= versionWithDataAndMetaFiles { if source == OneDriveSource && backupVersion >= versionWithDataAndMetaFiles {
name := itemData.UUID() name := itemData.UUID()
if strings.HasSuffix(name, DataFileSuffix) { if strings.HasSuffix(name, DataFileSuffix) {
metrics.Objects++ metrics.Objects++
metrics.TotalBytes += int64(len(copyBuffer)) metrics.TotalBytes += int64(len(copyBuffer))
@ -281,7 +302,7 @@ func RestoreCollection(
copyBuffer, copyBuffer,
source) source)
if err != nil { if err != nil {
errUpdater(itemData.UUID(), err) el.AddRecoverable(err)
continue continue
} }
@ -305,7 +326,7 @@ func RestoreCollection(
permsFile, err := dc.Fetch(ctx, metaName) permsFile, err := dc.Fetch(ctx, metaName)
if err != nil { if err != nil {
errUpdater(metaName, clues.Wrap(err, "getting item metadata")) el.AddRecoverable(clues.Wrap(err, "getting item metadata"))
continue continue
} }
@ -314,7 +335,7 @@ func RestoreCollection(
metaReader.Close() metaReader.Close()
if err != nil { if err != nil {
errUpdater(metaName, clues.Wrap(err, "deserializing item metadata")) el.AddRecoverable(clues.Wrap(err, "deserializing item metadata"))
continue continue
} }
@ -325,10 +346,9 @@ func RestoreCollection(
itemID, itemID,
colPerms, colPerms,
meta.Permissions, meta.Permissions,
permissionIDMappings, permissionIDMappings)
)
if err != nil { if err != nil {
errUpdater(trimmedName, clues.Wrap(err, "restoring item permissions")) el.AddRecoverable(clues.Wrap(err, "restoring item permissions"))
continue continue
} }
@ -344,28 +364,25 @@ func RestoreCollection(
} }
metaReader := itemData.ToReader() metaReader := itemData.ToReader()
meta, err := getMetadata(metaReader) defer metaReader.Close()
metaReader.Close()
meta, err := getMetadata(metaReader)
if err != nil { if err != nil {
errUpdater(itemData.UUID(), clues.Wrap(err, "folder metadata")) el.AddRecoverable(clues.Wrap(err, "getting directory metadata").WithClues(ctx))
continue continue
} }
trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix) trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix)
folderPerms[trimmedPath] = meta.Permissions folderPerms[trimmedPath] = meta.Permissions
} else {
if !ok {
errUpdater(itemData.UUID(), fmt.Errorf("invalid backup format, you might be using an old backup"))
continue
}
} }
} else { } else {
metrics.Objects++ metrics.Objects++
metrics.TotalBytes += int64(len(copyBuffer)) metrics.TotalBytes += int64(len(copyBuffer))
// No permissions stored at the moment for SharePoint // No permissions stored at the moment for SharePoint
_, itemInfo, err = restoreData(ctx, _, itemInfo, err = restoreData(
ctx,
service, service,
itemData.UUID(), itemData.UUID(),
itemData, itemData,
@ -374,7 +391,7 @@ func RestoreCollection(
copyBuffer, copyBuffer,
source) source)
if err != nil { if err != nil {
errUpdater(itemData.UUID(), err) el.AddRecoverable(err)
continue continue
} }
@ -389,6 +406,8 @@ func RestoreCollection(
} }
} }
} }
return metrics, folderPerms, permissionIDMappings, el.Failure()
} }
// createRestoreFoldersWithPermissions creates the restore folder hierarchy in // createRestoreFoldersWithPermissions creates the restore folder hierarchy in
@ -431,42 +450,31 @@ func CreateRestoreFolders(
) (string, error) { ) (string, error) {
driveRoot, err := service.Client().DrivesById(driveID).Root().Get(ctx, nil) driveRoot, err := service.Client().DrivesById(driveID).Root().Get(ctx, nil)
if err != nil { if err != nil {
return "", errors.Wrapf( return "", clues.Wrap(err, "getting drive root").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to get drive root. details: %s",
support.ConnectorStackErrorTrace(err),
)
} }
logger.Ctx(ctx).Debugf("Found Root for Drive %s with ID %s", driveID, *driveRoot.GetId()) parentFolderID := ptr.Val(driveRoot.GetId())
ctx = clues.Add(ctx, "drive_root_id", parentFolderID)
logger.Ctx(ctx).Debug("found drive root")
parentFolderID := *driveRoot.GetId()
for _, folder := range restoreFolders { for _, folder := range restoreFolders {
folderItem, err := getFolder(ctx, service, driveID, parentFolderID, folder) folderItem, err := getFolder(ctx, service, driveID, parentFolderID, folder)
if err == nil { if err == nil {
parentFolderID = *folderItem.GetId() parentFolderID = ptr.Val(folderItem.GetId())
logger.Ctx(ctx).Debugf("Found %s with ID %s", folder, parentFolderID)
continue continue
} }
if !errors.Is(err, errFolderNotFound) { if errors.Is(err, errFolderNotFound) {
return "", errors.Wrapf(err, "folder %s not found in drive(%s) parentFolder(%s)", folder, driveID, parentFolderID) return "", clues.Wrap(err, "folder not found").With("folder_id", folder).WithClues(ctx)
} }
folderItem, err = createItem(ctx, service, driveID, parentFolderID, newItem(folder, true)) folderItem, err = createItem(ctx, service, driveID, parentFolderID, newItem(folder, true))
if err != nil { if err != nil {
return "", errors.Wrapf( return "", clues.Wrap(err, "creating folder")
err,
"failed to create folder %s/%s. details: %s", parentFolderID, folder,
support.ConnectorStackErrorTrace(err),
)
} }
logger.Ctx(ctx).Debugw("resolved restore destination", logger.Ctx(ctx).Debugw("resolved restore destination", "dest_id", *folderItem.GetId())
"dest_name", folder,
"parent", parentFolderID,
"dest_id", *folderItem.GetId())
parentFolderID = *folderItem.GetId() parentFolderID = *folderItem.GetId()
} }
@ -487,25 +495,27 @@ func restoreData(
ctx, end := D.Span(ctx, "gc:oneDrive:restoreItem", D.Label("item_uuid", itemData.UUID())) ctx, end := D.Span(ctx, "gc:oneDrive:restoreItem", D.Label("item_uuid", itemData.UUID()))
defer end() defer end()
ctx = clues.Add(ctx, "item_name", itemData.UUID())
itemName := itemData.UUID() itemName := itemData.UUID()
trace.Log(ctx, "gc:oneDrive:restoreItem", itemName) trace.Log(ctx, "gc:oneDrive:restoreItem", itemName)
// Get the stream size (needed to create the upload session) // Get the stream size (needed to create the upload session)
ss, ok := itemData.(data.StreamSize) ss, ok := itemData.(data.StreamSize)
if !ok { if !ok {
return "", details.ItemInfo{}, errors.Errorf("item %q does not implement DataStreamInfo", itemName) return "", details.ItemInfo{}, clues.New("item does not implement DataStreamInfo").WithClues(ctx)
} }
// Create Item // Create Item
newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(name, false)) newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(name, false))
if err != nil { if err != nil {
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item %s", itemName) return "", details.ItemInfo{}, clues.Wrap(err, "creating item")
} }
// Get a drive item writer // Get a drive item writer
w, err := driveItemWriter(ctx, service, driveID, *newItem.GetId(), ss.Size()) w, err := driveItemWriter(ctx, service, driveID, *newItem.GetId(), ss.Size())
if err != nil { if err != nil {
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item upload session %s", itemName) return "", details.ItemInfo{}, clues.Wrap(err, "creating item writer")
} }
iReader := itemData.ToReader() iReader := itemData.ToReader()
@ -516,7 +526,7 @@ func restoreData(
// Upload the stream data // Upload the stream data
written, err := io.CopyBuffer(w, progReader, copyBuffer) written, err := io.CopyBuffer(w, progReader, copyBuffer)
if err != nil { if err != nil {
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to upload data: item %s", itemName) return "", details.ItemInfo{}, clues.Wrap(err, "writing item bytes").WithClues(ctx).With(graph.ErrData(err)...)
} }
dii := details.ItemInfo{} dii := details.ItemInfo{}
@ -607,16 +617,16 @@ func restorePermissions(
) (map[string]string, error) { ) (map[string]string, error) {
permAdded, permRemoved := getChildPermissions(childPerms, parentPerms) permAdded, permRemoved := getChildPermissions(childPerms, parentPerms)
ctx = clues.Add(ctx, "permission_item_id", itemID)
for _, p := range permRemoved { for _, p := range permRemoved {
err := service.Client().DrivesById(driveID).ItemsById(itemID). err := service.Client().
PermissionsById(permissionIDMappings[p.ID]).Delete(ctx, nil) DrivesById(driveID).
ItemsById(itemID).
PermissionsById(permissionIDMappings[p.ID]).
Delete(ctx, nil)
if err != nil { if err != nil {
return permissionIDMappings, errors.Wrapf( return permissionIDMappings, clues.Wrap(err, "removing permissions").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to remove permission for item %s. details: %s",
itemID,
support.ConnectorStackErrorTrace(err),
)
} }
} }
@ -641,12 +651,7 @@ func restorePermissions(
np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil) np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil)
if err != nil { if err != nil {
return permissionIDMappings, errors.Wrapf( return permissionIDMappings, clues.Wrap(err, "setting permissions").WithClues(ctx).With(graph.ErrData(err)...)
err,
"failed to set permission for item %s. details: %s",
itemID,
support.ConnectorStackErrorTrace(err),
)
} }
permissionIDMappings[p.ID] = *np.GetValue()[0].GetId() permissionIDMappings[p.ID] = *np.GetValue()[0].GetId()

View File

@ -28,13 +28,13 @@ func GetSitePages(
serv *discover.BetaService, serv *discover.BetaService,
siteID string, siteID string,
pages []string, pages []string,
errs *fault.Errors, errs *fault.Bus,
) ([]models.SitePageable, error) { ) ([]models.SitePageable, error) {
var ( var (
col = make([]models.SitePageable, 0) col = make([]models.SitePageable, 0)
semaphoreCh = make(chan struct{}, fetchChannelSize) semaphoreCh = make(chan struct{}, fetchChannelSize)
opts = retrieveSitePageOptions() opts = retrieveSitePageOptions()
err error el = errs.Local()
wg sync.WaitGroup wg sync.WaitGroup
m sync.Mutex m sync.Mutex
) )
@ -49,7 +49,7 @@ func GetSitePages(
} }
for _, entry := range pages { for _, entry := range pages {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -61,11 +61,14 @@ func GetSitePages(
defer wg.Done() defer wg.Done()
defer func() { <-semaphoreCh }() defer func() { <-semaphoreCh }()
var page models.SitePageable var (
page models.SitePageable
err error
)
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts) page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "fetching page").WithClues(ctx).With(graph.ErrData(err)...)) el.AddRecoverable(clues.Wrap(err, "fetching page").WithClues(ctx).With(graph.ErrData(err)...))
return return
} }
@ -75,7 +78,7 @@ func GetSitePages(
wg.Wait() wg.Wait()
return col, errs.Err() return col, el.Failure()
} }
// fetchPages utility function to return the tuple of item // fetchPages utility function to return the tuple of item

View File

@ -112,7 +112,7 @@ func (sc Collection) DoNotMergeItems() bool {
func (sc *Collection) Items( func (sc *Collection) Items(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
) <-chan data.Stream { ) <-chan data.Stream {
go sc.populate(ctx, errs) go sc.populate(ctx, errs)
return sc.data return sc.data
@ -183,7 +183,7 @@ func (sc *Collection) finishPopulation(
} }
// populate utility function to retrieve data from back store for a given collection // populate utility function to retrieve data from back store for a given collection
func (sc *Collection) populate(ctx context.Context, errs *fault.Errors) { func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
var ( var (
metrics numMetrics metrics numMetrics
writer = kw.NewJsonSerializationWriter() writer = kw.NewJsonSerializationWriter()
@ -221,9 +221,12 @@ func (sc *Collection) retrieveLists(
ctx context.Context, ctx context.Context,
wtr *kw.JsonSerializationWriter, wtr *kw.JsonSerializationWriter,
progress chan<- struct{}, progress chan<- struct{},
errs *fault.Errors, errs *fault.Bus,
) (numMetrics, error) { ) (numMetrics, error) {
var metrics numMetrics var (
metrics numMetrics
el = errs.Local()
)
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs, errs) lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs, errs)
if err != nil { if err != nil {
@ -234,13 +237,13 @@ func (sc *Collection) retrieveLists(
// For each models.Listable, object is serialized and the metrics are collected. // For each models.Listable, object is serialized and the metrics are collected.
// The progress is objected via the passed in channel. // The progress is objected via the passed in channel.
for _, lst := range lists { for _, lst := range lists {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
byteArray, err := serializeContent(wtr, lst) byteArray, err := serializeContent(wtr, lst)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "serializing list").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "serializing list").WithClues(ctx))
continue continue
} }
@ -266,16 +269,19 @@ func (sc *Collection) retrieveLists(
} }
} }
return metrics, errs.Err() return metrics, el.Failure()
} }
func (sc *Collection) retrievePages( func (sc *Collection) retrievePages(
ctx context.Context, ctx context.Context,
wtr *kw.JsonSerializationWriter, wtr *kw.JsonSerializationWriter,
progress chan<- struct{}, progress chan<- struct{},
errs *fault.Errors, errs *fault.Bus,
) (numMetrics, error) { ) (numMetrics, error) {
var metrics numMetrics var (
metrics numMetrics
el = errs.Local()
)
betaService := sc.betaService betaService := sc.betaService
if betaService == nil { if betaService == nil {
@ -292,13 +298,13 @@ func (sc *Collection) retrievePages(
// Pageable objects are not supported in v1.0 of msgraph at this time. // Pageable objects are not supported in v1.0 of msgraph at this time.
// TODO: Verify Parsable interface supported with modified-Pageable // TODO: Verify Parsable interface supported with modified-Pageable
for _, pg := range pages { for _, pg := range pages {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
byteArray, err := serializeContent(wtr, pg) byteArray, err := serializeContent(wtr, pg)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "serializing page").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "serializing page").WithClues(ctx))
continue continue
} }
@ -318,7 +324,7 @@ func (sc *Collection) retrievePages(
} }
} }
return metrics, errs.Err() return metrics, el.Failure()
} }
func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) { func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) {

View File

@ -36,7 +36,7 @@ func DataCollections(
serv graph.Servicer, serv graph.Servicer,
su statusUpdater, su statusUpdater,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
b, err := selector.ToSharePointBackup() b, err := selector.ToSharePointBackup()
if err != nil { if err != nil {
@ -44,12 +44,13 @@ func DataCollections(
} }
var ( var (
el = errs.Local()
site = b.DiscreteOwner site = b.DiscreteOwner
collections = []data.BackupCollection{} collections = []data.BackupCollection{}
) )
for _, scope := range b.Scopes() { for _, scope := range b.Scopes() {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -73,7 +74,7 @@ func DataCollections(
ctrlOpts, ctrlOpts,
errs) errs)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -86,9 +87,10 @@ func DataCollections(
site, site,
scope, scope,
su, su,
ctrlOpts) ctrlOpts,
errs)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -102,7 +104,7 @@ func DataCollections(
ctrlOpts, ctrlOpts,
errs) errs)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
} }
@ -111,7 +113,7 @@ func DataCollections(
foldersComplete <- struct{}{} foldersComplete <- struct{}{}
} }
return collections, nil, errs.Err() return collections, nil, el.Failure()
} }
func collectLists( func collectLists(
@ -120,11 +122,14 @@ func collectLists(
tenantID, siteID string, tenantID, siteID string,
updater statusUpdater, updater statusUpdater,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, error) { ) ([]data.BackupCollection, error) {
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections") logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
spcs := make([]data.BackupCollection, 0) var (
el = errs.Local()
spcs = make([]data.BackupCollection, 0)
)
lists, err := preFetchLists(ctx, serv, siteID) lists, err := preFetchLists(ctx, serv, siteID)
if err != nil { if err != nil {
@ -132,7 +137,7 @@ func collectLists(
} }
for _, tuple := range lists { for _, tuple := range lists {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -143,7 +148,7 @@ func collectLists(
path.ListsCategory, path.ListsCategory,
false) false)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "creating list collection path").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "creating list collection path").WithClues(ctx))
} }
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts) collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
@ -152,7 +157,7 @@ func collectLists(
spcs = append(spcs, collection) spcs = append(spcs, collection)
} }
return spcs, errs.Err() return spcs, el.Failure()
} }
// collectLibraries constructs a onedrive Collections struct and Get()s // collectLibraries constructs a onedrive Collections struct and Get()s
@ -165,6 +170,7 @@ func collectLibraries(
scope selectors.SharePointScope, scope selectors.SharePointScope,
updater statusUpdater, updater statusUpdater,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
logger.Ctx(ctx).Debug("creating SharePoint Library collections") logger.Ctx(ctx).Debug("creating SharePoint Library collections")
@ -183,7 +189,7 @@ func collectLibraries(
// TODO(ashmrtn): Pass previous backup metadata when SharePoint supports delta // TODO(ashmrtn): Pass previous backup metadata when SharePoint supports delta
// token-based incrementals. // token-based incrementals.
odcs, excludes, err := colls.Get(ctx, nil) odcs, excludes, err := colls.Get(ctx, nil, errs)
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "getting library").WithClues(ctx).With(graph.ErrData(err)...) return nil, nil, clues.Wrap(err, "getting library").WithClues(ctx).With(graph.ErrData(err)...)
} }
@ -200,11 +206,14 @@ func collectPages(
siteID string, siteID string,
updater statusUpdater, updater statusUpdater,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, error) { ) ([]data.BackupCollection, error) {
logger.Ctx(ctx).Debug("creating SharePoint Pages collections") logger.Ctx(ctx).Debug("creating SharePoint Pages collections")
spcs := make([]data.BackupCollection, 0) var (
el = errs.Local()
spcs = make([]data.BackupCollection, 0)
)
// make the betaClient // make the betaClient
// Need to receive From DataCollection Call // Need to receive From DataCollection Call
@ -221,7 +230,7 @@ func collectPages(
} }
for _, tuple := range tuples { for _, tuple := range tuples {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -232,7 +241,7 @@ func collectPages(
path.PagesCategory, path.PagesCategory,
false) false)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "creating page collection path").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "creating page collection path").WithClues(ctx))
} }
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts) collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
@ -242,7 +251,7 @@ func collectPages(
spcs = append(spcs, collection) spcs = append(spcs, collection)
} }
return spcs, errs.Err() return spcs, el.Failure()
} }
type folderMatcher struct { type folderMatcher struct {

View File

@ -105,7 +105,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
&MockGraphService{}, &MockGraphService{},
nil, nil,
control.Options{}) control.Options{})
err := c.UpdateCollections(ctx, "driveID1", "General", test.items, paths, newPaths, excluded, true) err := c.UpdateCollections(ctx, "driveID1", "General", test.items, paths, newPaths, excluded, true, fault.New(true))
test.expect(t, err) test.expect(t, err)
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap), "collection paths") assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap), "collection paths")
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count") assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")

View File

@ -92,11 +92,12 @@ func loadSiteLists(
gs graph.Servicer, gs graph.Servicer,
siteID string, siteID string,
listIDs []string, listIDs []string,
errs *fault.Errors, errs *fault.Bus,
) ([]models.Listable, error) { ) ([]models.Listable, error) {
var ( var (
results = make([]models.Listable, 0) results = make([]models.Listable, 0)
semaphoreCh = make(chan struct{}, fetchChannelSize) semaphoreCh = make(chan struct{}, fetchChannelSize)
el = errs.Local()
wg sync.WaitGroup wg sync.WaitGroup
m sync.Mutex m sync.Mutex
) )
@ -111,8 +112,8 @@ func loadSiteLists(
} }
for _, listID := range listIDs { for _, listID := range listIDs {
if errs.Err() != nil { if el.Failure() != nil {
return nil, errs.Err() break
} }
semaphoreCh <- struct{}{} semaphoreCh <- struct{}{}
@ -130,13 +131,13 @@ func loadSiteLists(
entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil) entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "getting site list").WithClues(ctx).With(graph.ErrData(err)...)) el.AddRecoverable(clues.Wrap(err, "getting site list").WithClues(ctx).With(graph.ErrData(err)...))
return return
} }
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs) cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "getting list contents")) el.AddRecoverable(clues.Wrap(err, "getting list contents"))
return return
} }
@ -149,7 +150,7 @@ func loadSiteLists(
wg.Wait() wg.Wait()
return results, errs.Err() return results, el.Failure()
} }
// fetchListContents utility function to retrieve associated M365 relationships // fetchListContents utility function to retrieve associated M365 relationships
@ -159,7 +160,7 @@ func fetchListContents(
ctx context.Context, ctx context.Context,
service graph.Servicer, service graph.Servicer,
siteID, listID string, siteID, listID string,
errs *fault.Errors, errs *fault.Bus,
) ( ) (
[]models.ColumnDefinitionable, []models.ColumnDefinitionable,
[]models.ContentTypeable, []models.ContentTypeable,
@ -192,16 +193,17 @@ func fetchListItems(
ctx context.Context, ctx context.Context,
gs graph.Servicer, gs graph.Servicer,
siteID, listID string, siteID, listID string,
errs *fault.Errors, errs *fault.Bus,
) ([]models.ListItemable, error) { ) ([]models.ListItemable, error) {
var ( var (
prefix = gs.Client().SitesById(siteID).ListsById(listID) prefix = gs.Client().SitesById(siteID).ListsById(listID)
builder = prefix.Items() builder = prefix.Items()
itms = make([]models.ListItemable, 0) itms = make([]models.ListItemable, 0)
el = errs.Local()
) )
for { for {
if errs.Err() != nil { if errs.Failure() != nil {
break break
} }
@ -211,7 +213,7 @@ func fetchListItems(
} }
for _, itm := range resp.GetValue() { for _, itm := range resp.GetValue() {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -219,7 +221,7 @@ func fetchListItems(
fields, err := newPrefix.Fields().Get(ctx, nil) fields, err := newPrefix.Fields().Get(ctx, nil)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "getting list fields").WithClues(ctx).With(graph.ErrData(err)...)) el.AddRecoverable(clues.Wrap(err, "getting list fields").WithClues(ctx).With(graph.ErrData(err)...))
continue continue
} }
@ -235,7 +237,7 @@ func fetchListItems(
builder = mssite.NewItemListsItemItemsRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter()) builder = mssite.NewItemListsItemItemsRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
} }
return itms, errs.Err() return itms, el.Failure()
} }
// fetchColumns utility function to return columns from a site. // fetchColumns utility function to return columns from a site.
@ -298,15 +300,16 @@ func fetchContentTypes(
ctx context.Context, ctx context.Context,
gs graph.Servicer, gs graph.Servicer,
siteID, listID string, siteID, listID string,
errs *fault.Errors, errs *fault.Bus,
) ([]models.ContentTypeable, error) { ) ([]models.ContentTypeable, error) {
var ( var (
el = errs.Local()
cTypes = make([]models.ContentTypeable, 0) cTypes = make([]models.ContentTypeable, 0)
builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypes() builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypes()
) )
for { for {
if errs.Err() != nil { if errs.Failure() != nil {
break break
} }
@ -316,7 +319,7 @@ func fetchContentTypes(
} }
for _, cont := range resp.GetValue() { for _, cont := range resp.GetValue() {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -324,7 +327,7 @@ func fetchContentTypes(
links, err := fetchColumnLinks(ctx, gs, siteID, listID, id) links, err := fetchColumnLinks(ctx, gs, siteID, listID, id)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -332,7 +335,7 @@ func fetchContentTypes(
cs, err := fetchColumns(ctx, gs, siteID, listID, id) cs, err := fetchColumns(ctx, gs, siteID, listID, id)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -348,7 +351,7 @@ func fetchContentTypes(
builder = mssite.NewItemListsItemContentTypesRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter()) builder = mssite.NewItemListsItemContentTypesRequestBuilder(*resp.GetOdataNextLink(), gs.Adapter())
} }
return cTypes, errs.Err() return cTypes, el.Failure()
} }
func fetchColumnLinks( func fetchColumnLinks(

View File

@ -46,7 +46,7 @@ func RestoreCollections(
dest control.RestoreDestination, dest control.RestoreDestination,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) (*support.ConnectorOperationStatus, error) { ) (*support.ConnectorOperationStatus, error) {
var ( var (
err error err error
@ -56,7 +56,6 @@ func RestoreCollections(
// Iterate through the data collections and restore the contents of each // Iterate through the data collections and restore the contents of each
for _, dc := range dcs { for _, dc := range dcs {
var ( var (
canceled bool
category = dc.FullPath().Category() category = dc.FullPath().Category()
metrics support.CollectionMetrics metrics support.CollectionMetrics
ictx = clues.Add(ctx, ictx = clues.Add(ctx,
@ -67,7 +66,7 @@ func RestoreCollections(
switch dc.FullPath().Category() { switch dc.FullPath().Category() {
case path.LibrariesCategory: case path.LibrariesCategory:
metrics, _, _, canceled = onedrive.RestoreCollection( metrics, _, _, err = onedrive.RestoreCollection(
ictx, ictx,
backupVersion, backupVersion,
service, service,
@ -76,9 +75,9 @@ func RestoreCollections(
onedrive.SharePointSource, onedrive.SharePointSource,
dest.ContainerName, dest.ContainerName,
deets, deets,
func(s string, err error) { errs.Add(err) },
map[string]string{}, map[string]string{},
false) false,
errs)
case path.ListsCategory: case path.ListsCategory:
metrics, err = RestoreListCollection( metrics, err = RestoreListCollection(
ictx, ictx,
@ -101,7 +100,7 @@ func RestoreCollections(
restoreMetrics.Combine(metrics) restoreMetrics.Combine(metrics)
if canceled || err != nil { if err != nil {
break break
} }
} }
@ -216,7 +215,7 @@ func RestoreListCollection(
dc data.RestoreCollection, dc data.RestoreCollection,
restoreContainerName string, restoreContainerName string,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) (support.CollectionMetrics, error) { ) (support.CollectionMetrics, error) {
ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath())) ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath()))
defer end() defer end()
@ -226,13 +225,14 @@ func RestoreListCollection(
directory = dc.FullPath() directory = dc.FullPath()
siteID = directory.ResourceOwner() siteID = directory.ResourceOwner()
items = dc.Items(ctx, errs) items = dc.Items(ctx, errs)
el = errs.Local()
) )
trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String()) trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String())
for { for {
if errs.Err() != nil { if el.Failure() != nil {
return metrics, errs.Err() break
} }
select { select {
@ -252,7 +252,7 @@ func RestoreListCollection(
siteID, siteID,
restoreContainerName) restoreContainerName)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -260,7 +260,7 @@ func RestoreListCollection(
itemPath, err := dc.FullPath().Append(itemData.UUID(), true) itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
continue continue
} }
@ -275,6 +275,8 @@ func RestoreListCollection(
metrics.Successes++ metrics.Successes++
} }
} }
return metrics, el.Failure()
} }
// RestorePageCollection handles restoration of an individual site page collection. // RestorePageCollection handles restoration of an individual site page collection.
@ -287,7 +289,7 @@ func RestorePageCollection(
dc data.RestoreCollection, dc data.RestoreCollection,
restoreContainerName string, restoreContainerName string,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) (support.CollectionMetrics, error) { ) (support.CollectionMetrics, error) {
var ( var (
metrics = support.CollectionMetrics{} metrics = support.CollectionMetrics{}
@ -305,14 +307,15 @@ func RestorePageCollection(
return metrics, clues.Wrap(err, "constructing graph client") return metrics, clues.Wrap(err, "constructing graph client")
} }
service := discover.NewBetaService(adpt) var (
el = errs.Local()
// Restore items from collection service = discover.NewBetaService(adpt)
items := dc.Items(ctx, errs) items = dc.Items(ctx, errs)
)
for { for {
if errs.Err() != nil { if el.Failure() != nil {
return metrics, errs.Err() break
} }
select { select {
@ -332,7 +335,7 @@ func RestorePageCollection(
siteID, siteID,
restoreContainerName) restoreContainerName)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
@ -340,7 +343,7 @@ func RestorePageCollection(
itemPath, err := dc.FullPath().Append(itemData.UUID(), true) itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
continue continue
} }
@ -355,4 +358,6 @@ func RestorePageCollection(
metrics.Successes++ metrics.Successes++
} }
} }
return metrics, el.Failure()
} }

View File

@ -33,7 +33,7 @@ type Collection interface {
// Each returned struct contains the next item in the collection // Each returned struct contains the next item in the collection
// The channel is closed when there are no more items in the collection or if // The channel is closed when there are no more items in the collection or if
// an unrecoverable error caused an early termination in the sender. // an unrecoverable error caused an early termination in the sender.
Items(ctx context.Context, errs *fault.Errors) <-chan Stream Items(ctx context.Context, errs *fault.Bus) <-chan Stream
// FullPath returns a path struct that acts as a metadata tag for this // FullPath returns a path struct that acts as a metadata tag for this
// Collection. // Collection.
FullPath() path.Path FullPath() path.Path

View File

@ -26,7 +26,7 @@ type kopiaDataCollection struct {
func (kdc *kopiaDataCollection) Items( func (kdc *kopiaDataCollection) Items(
ctx context.Context, ctx context.Context,
errs *fault.Errors, errs *fault.Bus,
) <-chan data.Stream { ) <-chan data.Stream {
res := make(chan data.Stream) res := make(chan data.Stream)

View File

@ -139,7 +139,7 @@ type corsoProgress struct {
toMerge map[string]PrevRefs toMerge map[string]PrevRefs
mu sync.RWMutex mu sync.RWMutex
totalBytes int64 totalBytes int64
errs *fault.Errors errs *fault.Bus
} }
// Kopia interface function used as a callback when kopia finishes processing a // Kopia interface function used as a callback when kopia finishes processing a
@ -169,7 +169,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
// never had to materialize their details in-memory. // never had to materialize their details in-memory.
if d.info == nil { if d.info == nil {
if d.prevPath == nil { if d.prevPath == nil {
cp.errs.Add(clues.New("item sourced from previous backup with no previous path"). cp.errs.AddRecoverable(clues.New("item sourced from previous backup with no previous path").
With( With(
"service", d.repoPath.Service().String(), "service", d.repoPath.Service().String(),
"category", d.repoPath.Category().String(), "category", d.repoPath.Category().String(),
@ -263,7 +263,7 @@ func (cp *corsoProgress) CachedFile(fname string, size int64) {
func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) { func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
defer cp.UploadProgress.Error(relpath, err, isIgnored) defer cp.UploadProgress.Error(relpath, err, isIgnored)
cp.errs.Add(clues.Wrap(err, "kopia reported error"). cp.errs.AddRecoverable(clues.Wrap(err, "kopia reported error").
With("is_ignored", isIgnored, "relative_path", relpath)) With("is_ignored", isIgnored, "relative_path", relpath))
} }
@ -334,7 +334,7 @@ func collectionEntries(
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true) itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
if err != nil { if err != nil {
err = errors.Wrap(err, "getting full item path") err = errors.Wrap(err, "getting full item path")
progress.errs.Add(err) progress.errs.AddRecoverable(err)
logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...) logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...)

View File

@ -518,7 +518,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
assert.Empty(t, cp.pending) assert.Empty(t, cp.pending)
assert.Empty(t, bd.Details().Entries) assert.Empty(t, bd.Details().Entries)
assert.Error(t, cp.errs.Err()) assert.Error(t, cp.errs.Failure())
} }
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() { func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {

View File

@ -134,7 +134,7 @@ func (w Wrapper) BackupCollections(
globalExcludeSet map[string]struct{}, globalExcludeSet map[string]struct{},
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Errors, errs *fault.Bus,
) (*BackupStats, *details.Builder, map[string]PrevRefs, error) { ) (*BackupStats, *details.Builder, map[string]PrevRefs, error) {
if w.c == nil { if w.c == nil {
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx) return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
@ -184,7 +184,7 @@ func (w Wrapper) BackupCollections(
return nil, nil, nil, err return nil, nil, nil, err
} }
return s, progress.deets, progress.toMerge, progress.errs.Err() return s, progress.deets, progress.toMerge, progress.errs.Failure()
} }
func (w Wrapper) makeSnapshotWithRoot( func (w Wrapper) makeSnapshotWithRoot(
@ -383,7 +383,7 @@ func (w Wrapper) RestoreMultipleItems(
snapshotID string, snapshotID string,
paths []path.Path, paths []path.Path,
bcounter ByteCounter, bcounter ByteCounter,
errs *fault.Errors, errs *fault.Bus,
) ([]data.RestoreCollection, error) { ) ([]data.RestoreCollection, error) {
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems") ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
defer end() defer end()
@ -397,23 +397,26 @@ func (w Wrapper) RestoreMultipleItems(
return nil, err return nil, err
} }
// Maps short ID of parent path to data collection for that folder. var (
cols := map[string]*kopiaDataCollection{} // Maps short ID of parent path to data collection for that folder.
cols = map[string]*kopiaDataCollection{}
el = errs.Local()
)
for _, itemPath := range paths { for _, itemPath := range paths {
if errs.Err() != nil { if el.Failure() != nil {
return nil, errs.Err() return nil, el.Failure()
} }
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter) ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
if err != nil { if err != nil {
errs.Add(err) el.AddRecoverable(err)
continue continue
} }
parentPath, err := itemPath.Dir() parentPath, err := itemPath.Dir()
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "making directory collection").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "making directory collection").WithClues(ctx))
continue continue
} }
@ -437,7 +440,7 @@ func (w Wrapper) RestoreMultipleItems(
res = append(res, c) res = append(res, c)
} }
return res, errs.Err() return res, el.Failure()
} }
// DeleteSnapshot removes the provided manifest from kopia. // DeleteSnapshot removes the provided manifest from kopia.

View File

@ -394,7 +394,7 @@ type mockBackupCollection struct {
streams []data.Stream streams []data.Stream
} }
func (c *mockBackupCollection) Items(context.Context, *fault.Errors) <-chan data.Stream { func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
res := make(chan data.Stream) res := make(chan data.Stream)
go func() { go func() {

View File

@ -101,7 +101,7 @@ type backupStats struct {
} }
type detailsWriter interface { type detailsWriter interface {
WriteBackupDetails(context.Context, *details.Details, *fault.Errors) (string, error) WriteBackupDetails(context.Context, *details.Details, *fault.Bus) (string, error)
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -181,7 +181,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
With("err", err). With("err", err).
Errorw("doing backup", clues.InErr(err).Slice()...) Errorw("doing backup", clues.InErr(err).Slice()...)
op.Errors.Fail(errors.Wrap(err, "doing backup")) op.Errors.Fail(errors.Wrap(err, "doing backup"))
opStats.readErr = op.Errors.Err() opStats.readErr = op.Errors.Failure()
} }
// ----- // -----
@ -191,9 +191,9 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
err = op.persistResults(startTime, &opStats) err = op.persistResults(startTime, &opStats)
if err != nil { if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting backup results")) op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
opStats.writeErr = op.Errors.Err() opStats.writeErr = op.Errors.Failure()
return op.Errors.Err() return op.Errors.Failure()
} }
err = op.createBackupModels( err = op.createBackupModels(
@ -204,9 +204,9 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
deets.Details()) deets.Details())
if err != nil { if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting backup")) op.Errors.Fail(errors.Wrap(err, "persisting backup"))
opStats.writeErr = op.Errors.Err() opStats.writeErr = op.Errors.Failure()
return op.Errors.Err() return op.Errors.Failure()
} }
logger.Ctx(ctx).Infow("completed backup", "results", op.Results) logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
@ -313,7 +313,7 @@ func produceBackupDataCollections(
sel selectors.Selector, sel selectors.Selector,
metadata []data.RestoreCollection, metadata []data.RestoreCollection,
ctrlOpts control.Options, ctrlOpts control.Options,
errs *fault.Errors, errs *fault.Bus,
) ([]data.BackupCollection, map[string]struct{}, error) { ) ([]data.BackupCollection, map[string]struct{}, error) {
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup")) complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
defer func() { defer func() {
@ -337,7 +337,7 @@ type backuper interface {
excluded map[string]struct{}, excluded map[string]struct{},
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Errors, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) ) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error)
} }
@ -396,7 +396,7 @@ func consumeBackupDataCollections(
excludes map[string]struct{}, excludes map[string]struct{},
backupID model.StableID, backupID model.StableID,
isIncremental bool, isIncremental bool,
errs *fault.Errors, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) { ) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data")) complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
defer func() { defer func() {
@ -505,7 +505,7 @@ func mergeDetails(
mans []*kopia.ManifestEntry, mans []*kopia.ManifestEntry,
shortRefsFromPrevBackup map[string]kopia.PrevRefs, shortRefsFromPrevBackup map[string]kopia.PrevRefs,
deets *details.Builder, deets *details.Builder,
errs *fault.Errors, errs *fault.Bus,
) error { ) error {
// Don't bother loading any of the base details if there's nothing we need to // Don't bother loading any of the base details if there's nothing we need to
// merge. // merge.

View File

@ -153,8 +153,8 @@ func runAndCheckBackup(
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read") assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded") assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners") assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
assert.NoError(t, bo.Errors.Err(), "incremental non-recoverable error") assert.NoError(t, bo.Errors.Failure(), "incremental non-recoverable error")
assert.Empty(t, bo.Errors.Errs(), "incremental recoverable/iteration errors") assert.Empty(t, bo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.NoError(t, bo.Results.ReadErrors, "errors reading data") assert.NoError(t, bo.Results.ReadErrors, "errors reading data")
assert.NoError(t, bo.Results.WriteErrors, "errors writing data") assert.NoError(t, bo.Results.WriteErrors, "errors writing data")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events") assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
@ -626,8 +626,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read") assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded") assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner") assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error") assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error")
assert.Empty(t, incBO.Errors.Errs(), "count incremental recoverable/iteration errors") assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors") assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors") assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events") assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
@ -1056,8 +1056,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type. // +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written") assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read") assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error") assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error")
assert.Empty(t, incBO.Errors.Errs(), "incremental recoverable/iteration errors") assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors") assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors") assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events") assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")

View File

@ -63,7 +63,7 @@ func (mr *mockRestorer) RestoreMultipleItems(
snapshotID string, snapshotID string,
paths []path.Path, paths []path.Path,
bc kopia.ByteCounter, bc kopia.ByteCounter,
errs *fault.Errors, errs *fault.Bus,
) ([]data.RestoreCollection, error) { ) ([]data.RestoreCollection, error) {
mr.gotPaths = append(mr.gotPaths, paths...) mr.gotPaths = append(mr.gotPaths, paths...)
@ -99,7 +99,7 @@ func (mbu mockBackuper) BackupCollections(
excluded map[string]struct{}, excluded map[string]struct{},
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Errors, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) { ) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, error) {
if mbu.checkFunc != nil { if mbu.checkFunc != nil {
mbu.checkFunc(bases, cs, tags, buildTreeWithBase) mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
@ -117,7 +117,7 @@ type mockDetailsReader struct {
func (mdr mockDetailsReader) ReadBackupDetails( func (mdr mockDetailsReader) ReadBackupDetails(
ctx context.Context, ctx context.Context,
detailsID string, detailsID string,
errs *fault.Errors, errs *fault.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
r := mdr.entries[detailsID] r := mdr.entries[detailsID]

View File

@ -13,7 +13,7 @@ import (
) )
type detailsReader interface { type detailsReader interface {
ReadBackupDetails(ctx context.Context, detailsID string, errs *fault.Errors) (*details.Details, error) ReadBackupDetails(ctx context.Context, detailsID string, errs *fault.Bus) (*details.Details, error)
} }
func getBackupAndDetailsFromID( func getBackupAndDetailsFromID(
@ -21,7 +21,7 @@ func getBackupAndDetailsFromID(
backupID model.StableID, backupID model.StableID,
ms *store.Wrapper, ms *store.Wrapper,
detailsStore detailsReader, detailsStore detailsReader,
errs *fault.Errors, errs *fault.Bus,
) (*backup.Backup, *details.Details, error) { ) (*backup.Backup, *details.Details, error) {
dID, bup, err := ms.GetDetailsIDFromBackupID(ctx, backupID) dID, bup, err := ms.GetDetailsIDFromBackupID(ctx, backupID)
if err != nil { if err != nil {

View File

@ -45,7 +45,7 @@ func produceManifestsAndMetadata(
reasons []kopia.Reason, reasons []kopia.Reason,
tenantID string, tenantID string,
getMetadata bool, getMetadata bool,
errs *fault.Errors, errs *fault.Bus,
) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) { ) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
var ( var (
metadataFiles = graph.AllMetadataFileNames() metadataFiles = graph.AllMetadataFileNames()
@ -135,14 +135,15 @@ func produceManifestsAndMetadata(
// of manifests, that each manifest's Reason (owner, service, category) is only // of manifests, that each manifest's Reason (owner, service, category) is only
// included once. If a reason is duplicated by any two manifests, an error is // included once. If a reason is duplicated by any two manifests, an error is
// returned. // returned.
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs *fault.Errors) error { func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs *fault.Bus) error {
var ( var (
failed bool failed bool
reasons = map[string]manifest.ID{} reasons = map[string]manifest.ID{}
el = errs.Local()
) )
for _, man := range mans { for _, man := range mans {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
@ -162,7 +163,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs
if b, ok := reasons[reasonKey]; ok { if b, ok := reasons[reasonKey]; ok {
failed = true failed = true
errs.Add(clues.New("manifests have overlapping reasons"). el.AddRecoverable(clues.New("manifests have overlapping reasons").
WithClues(ctx). WithClues(ctx).
With("other_manifest_id", b)) With("other_manifest_id", b))
@ -177,7 +178,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs
return clues.New("multiple base snapshots qualify").WithClues(ctx) return clues.New("multiple base snapshots qualify").WithClues(ctx)
} }
return errs.Err() return el.Failure()
} }
// collectMetadata retrieves all metadata files associated with the manifest. // collectMetadata retrieves all metadata files associated with the manifest.
@ -187,7 +188,7 @@ func collectMetadata(
man *kopia.ManifestEntry, man *kopia.ManifestEntry,
fileNames []string, fileNames []string,
tenantID string, tenantID string,
errs *fault.Errors, errs *fault.Bus,
) ([]data.RestoreCollection, error) { ) ([]data.RestoreCollection, error) {
paths := []path.Path{} paths := []path.Path{}

View File

@ -53,7 +53,7 @@ type mockColl struct {
p path.Path p path.Path
} }
func (mc mockColl) Items(context.Context, *fault.Errors) <-chan data.Stream { func (mc mockColl) Items(context.Context, *fault.Bus) <-chan data.Stream {
return nil return nil
} }

View File

@ -55,7 +55,7 @@ const (
type operation struct { type operation struct {
CreatedAt time.Time `json:"createdAt"` CreatedAt time.Time `json:"createdAt"`
Errors *fault.Errors `json:"errors"` Errors *fault.Bus `json:"errors"`
Options control.Options `json:"options"` Options control.Options `json:"options"`
Status opStatus `json:"status"` Status opStatus `json:"status"`
@ -100,7 +100,7 @@ func connectToM365(
ctx context.Context, ctx context.Context,
sel selectors.Selector, sel selectors.Selector,
acct account.Account, acct account.Account,
errs *fault.Errors, errs *fault.Bus,
) (*connector.GraphConnector, error) { ) (*connector.GraphConnector, error) {
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365")) complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365"))
defer func() { defer func() {

View File

@ -104,7 +104,7 @@ type restorer interface {
snapshotID string, snapshotID string,
paths []path.Path, paths []path.Path,
bc kopia.ByteCounter, bc kopia.ByteCounter,
errs *fault.Errors, errs *fault.Bus,
) ([]data.RestoreCollection, error) ) ([]data.RestoreCollection, error)
} }
@ -167,7 +167,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
With("err", err). With("err", err).
Errorw("doing restore", clues.InErr(err).Slice()...) Errorw("doing restore", clues.InErr(err).Slice()...)
op.Errors.Fail(errors.Wrap(err, "doing restore")) op.Errors.Fail(errors.Wrap(err, "doing restore"))
opStats.readErr = op.Errors.Err() opStats.readErr = op.Errors.Failure()
} }
// ----- // -----
@ -177,9 +177,9 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
err = op.persistResults(ctx, start, &opStats) err = op.persistResults(ctx, start, &opStats)
if err != nil { if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting restore results")) op.Errors.Fail(errors.Wrap(err, "persisting restore results"))
opStats.writeErr = op.Errors.Err() opStats.writeErr = op.Errors.Failure()
return nil, op.Errors.Err() return nil, op.Errors.Failure()
} }
logger.Ctx(ctx).Infow("completed restore", "results", op.Results) logger.Ctx(ctx).Infow("completed restore", "results", op.Results)
@ -344,7 +344,7 @@ func formatDetailsForRestoration(
ctx context.Context, ctx context.Context,
sel selectors.Selector, sel selectors.Selector,
deets *details.Details, deets *details.Details,
errs *fault.Errors, errs *fault.Bus,
) ([]path.Path, error) { ) ([]path.Path, error) {
fds, err := sel.Reduce(ctx, deets, errs) fds, err := sel.Reduce(ctx, deets, errs)
if err != nil { if err != nil {
@ -354,16 +354,17 @@ func formatDetailsForRestoration(
var ( var (
fdsPaths = fds.Paths() fdsPaths = fds.Paths()
paths = make([]path.Path, len(fdsPaths)) paths = make([]path.Path, len(fdsPaths))
el = errs.Local()
) )
for i := range fdsPaths { for i := range fdsPaths {
if errs.Err() != nil { if el.Failure() != nil {
return nil, errs.Err() break
} }
p, err := path.FromDataLayerPath(fdsPaths[i], true) p, err := path.FromDataLayerPath(fdsPaths[i], true)
if err != nil { if err != nil {
errs.Add(clues. el.AddRecoverable(clues.
Wrap(err, "parsing details path after reduction"). Wrap(err, "parsing details path after reduction").
WithMap(clues.In(ctx)). WithMap(clues.In(ctx)).
With("path", fdsPaths[i])) With("path", fdsPaths[i]))
@ -385,5 +386,5 @@ func formatDetailsForRestoration(
return paths[i].String() < paths[j].String() return paths[i].String() < paths[j].String()
}) })
return paths, errs.Err() return paths, el.Failure()
} }

View File

@ -290,7 +290,6 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
ds, err := ro.Run(ctx) ds, err := ro.Run(ctx)
require.NoError(t, err, "restoreOp.Run()") require.NoError(t, err, "restoreOp.Run()")
require.Empty(t, ro.Errors.Errs(), "restoreOp.Run() recoverable errors")
require.NotEmpty(t, ro.Results, "restoreOp results") require.NotEmpty(t, ro.Results, "restoreOp results")
require.NotNil(t, ds, "restored details") require.NotNil(t, ds, "restored details")
assert.Equal(t, ro.Status, Completed, "restoreOp status") assert.Equal(t, ro.Status, Completed, "restoreOp status")
@ -299,8 +298,8 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written") assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written")
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read") assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners") assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
assert.NoError(t, ro.Errors.Err(), "non-recoverable error") assert.NoError(t, ro.Errors.Failure(), "non-recoverable error")
assert.Empty(t, ro.Errors.Errs(), "recoverable errors") assert.Empty(t, ro.Errors.Recovered(), "recoverable errors")
assert.NoError(t, ro.Results.ReadErrors, "errors while reading restore data") assert.NoError(t, ro.Results.ReadErrors, "errors while reading restore data")
assert.NoError(t, ro.Results.WriteErrors, "errors while writing restore data") assert.NoError(t, ro.Results.WriteErrors, "errors while writing restore data")
assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items") assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")

View File

@ -47,7 +47,7 @@ const (
func (ss *streamStore) WriteBackupDetails( func (ss *streamStore) WriteBackupDetails(
ctx context.Context, ctx context.Context,
backupDetails *details.Details, backupDetails *details.Details,
errs *fault.Errors, errs *fault.Bus,
) (string, error) { ) (string, error) {
// construct the path of the container for the `details` item // construct the path of the container for the `details` item
p, err := path.Builder{}. p, err := path.Builder{}.
@ -95,7 +95,7 @@ func (ss *streamStore) WriteBackupDetails(
func (ss *streamStore) ReadBackupDetails( func (ss *streamStore) ReadBackupDetails(
ctx context.Context, ctx context.Context,
detailsID string, detailsID string,
errs *fault.Errors, errs *fault.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
// construct the path for the `details` item // construct the path for the `details` item
detailsPath, err := path.Builder{}. detailsPath, err := path.Builder{}.
@ -195,7 +195,7 @@ func (dc *streamCollection) DoNotMergeItems() bool {
// Items() always returns a channel with a single data.Stream // Items() always returns a channel with a single data.Stream
// representing the object to be persisted // representing the object to be persisted
func (dc *streamCollection) Items(context.Context, *fault.Errors) <-chan data.Stream { func (dc *streamCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
items := make(chan data.Stream, 1) items := make(chan data.Stream, 1)
defer close(items) defer close(items)
items <- dc.item items <- dc.item

View File

@ -38,7 +38,7 @@ type Backup struct {
Version int `json:"version"` Version int `json:"version"`
// Errors contains all errors aggregated during a backup operation. // Errors contains all errors aggregated during a backup operation.
Errors fault.ErrorsData `json:"errors"` Errors fault.Errors `json:"errors"`
// stats are embedded so that the values appear as top-level properties // stats are embedded so that the values appear as top-level properties
stats.Errs // Deprecated, replaced with Errors. stats.Errs // Deprecated, replaced with Errors.
@ -55,7 +55,7 @@ func New(
selector selectors.Selector, selector selectors.Selector,
rw stats.ReadWrites, rw stats.ReadWrites,
se stats.StartAndEndTime, se stats.StartAndEndTime,
errs *fault.Errors, errs *fault.Bus,
) *Backup { ) *Backup {
return &Backup{ return &Backup{
BaseModel: model.BaseModel{ BaseModel: model.BaseModel{
@ -69,7 +69,7 @@ func New(
DetailsID: detailsID, DetailsID: detailsID,
Status: status, Status: status,
Selector: selector, Selector: selector,
Errors: errs.Data(), Errors: errs.Errors(),
ReadWrites: rw, ReadWrites: rw,
StartAndEndTime: se, StartAndEndTime: se,
Version: Version, Version: Version,
@ -156,12 +156,12 @@ func (b Backup) errorCount() int {
} }
// future tracking // future tracking
if b.Errors.Err != nil || len(b.Errors.Errs) > 0 { if b.Errors.Failure != nil || len(b.Errors.Recovered) > 0 {
if b.Errors.Err != nil { if b.Errors.Failure != nil {
errCount++ errCount++
} }
errCount += len(b.Errors.Errs) errCount += len(b.Errors.Recovered)
} }
return errCount return errCount

View File

@ -41,8 +41,8 @@ func stubBackup(t time.Time) backup.Backup {
DetailsID: "details", DetailsID: "details",
Status: "status", Status: "status",
Selector: sel.Selector, Selector: sel.Selector,
Errors: fault.ErrorsData{ Errors: fault.Errors{
Errs: []error{errors.New("read"), errors.New("write")}, Recovered: []error{errors.New("read"), errors.New("write")},
}, },
Errs: stats.Errs{ Errs: stats.Errs{
ReadErrors: errors.New("1"), ReadErrors: errors.New("1"),

View File

@ -21,18 +21,18 @@ type mockController struct {
errors any errors any
} }
func connectClient() error { return nil } func connectClient() error { return nil }
func dependencyCall() error { return nil } func dependencyCall() error { return nil }
func getIthItem(i int) error { return nil } func getIthItem(i int) error { return nil }
func getData() ([]string, error) { return nil, nil } func getData() ([]string, error) { return nil, nil }
func storeData([]string, *fault.Errors) {} func storeData([]string, *fault.Bus) {}
type mockOper struct { type mockOper struct {
Errors *fault.Errors Errors *fault.Bus
} }
func newOperation() mockOper { return mockOper{fault.New(true)} } func newOperation() mockOper { return mockOper{fault.New(true)} }
func (m mockOper) Run() *fault.Errors { return m.Errors } func (m mockOper) Run() *fault.Bus { return m.Errors }
type mockDepenedency struct{} type mockDepenedency struct{}
@ -47,44 +47,40 @@ var dependency = mockDepenedency{}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// ExampleNew highlights assumptions and best practices // ExampleNew highlights assumptions and best practices
// for generating fault.Errors structs. // for generating fault.Bus structs.
func ExampleNew() { func ExampleNew() {
// fault.Errors should only be generated during the construction of // New fault.Bus instances should only get generated during initialization.
// another controller, such as a new Backup or Restore Operations. // Such as when starting up a new Backup or Restore Operation.
// Configurations like failFast are set during construction. // Configuration (eg: failFast) is set during construction and cannot
// // be updated.
// Generating new fault.Errors structs outside of an operation
// controller is a smell, and should be avoided. If you need
// to aggregate errors, you should accept an interface and pass
// an fault.Errors instance into it.
ctrl = mockController{ ctrl = mockController{
errors: fault.New(false), errors: fault.New(false),
} }
} }
// ExampleErrors_Fail describes the assumptions and best practices // ExampleBus_Fail describes the assumptions and best practices
// for setting the Failure error. // for setting the Failure error.
func ExampleErrors_Fail() { func ExampleBus_Fail() {
errs := fault.New(false) errs := fault.New(false)
// Fail() is used to record non-recoverable errors. // Fail() is used to record non-recoverable errors.
// //
// Fail() should only get called in the last step before returning // Fail() should only get called in the last step before returning
// a fault.Errors from a controller. In all other cases, you // a fault.Bus from a controller. In all other cases, you
// should simply return an error and expect the upstream controller // can stick to standard golang error handling and expect some upstream
// to call Fail() for you. // controller to call Fail() for you (if necessary).
topLevelHandler := func(errs *fault.Errors) *fault.Errors { topLevelHandler := func(errs *fault.Bus) *fault.Bus {
if err := connectClient(); err != nil { if err := connectClient(); err != nil {
return errs.Fail(err) return errs.Fail(err)
} }
return errs return errs
} }
if errs := topLevelHandler(errs); errs.Err() != nil { if errs := topLevelHandler(errs); errs.Failure() != nil {
fmt.Println(errs.Err()) fmt.Println(errs.Failure())
} }
// Only the topmost func in the stack should set the Fail() err. // Only the topmost func in the stack should set the failure.
// IE: Fail() is not Wrap(). In lower levels, errors should get // IE: Fail() is not Wrap(). In lower levels, errors should get
// wrapped and returned like normal, and only handled by fault // wrapped and returned like normal, and only handled by fault
// at the end. // at the end.
@ -102,22 +98,30 @@ func ExampleErrors_Fail() {
} }
} }
// ExampleErrors_Add describes the assumptions and best practices // ExampleBus_AddRecoverable describes the assumptions and best practices
// for aggregating iterable or recoverable errors. // for aggregating iterable or recoverable errors.
func ExampleErrors_Add() { func ExampleBus_AddRecoverable() {
errs := fault.New(false) errs := fault.New(false)
// Add() is used to record any recoverable error. // AddRecoverable() is used to record any recoverable error.
// //
// Add() should only get called as the last error handling step // What counts as a recoverable error? That's up to the given
// within a loop or stream. In all other cases, you can return // implementation. Normally, it's an inability to process one
// an error like normal and expect the upstream point of iteration // of many items within an iteration (ex: couldn't download 1 of
// to call Add() for you. // 1000 emails). But just because an error occurred during a loop
// doesn't mean it's recoverable, ex: a failure to retrieve the next
// page when accumulating a batch of resources isn't usually
// recoverable. The choice is always up to the function at hand.
//
// AddRecoverable() should only get called as the top-most location
// of error handling within the recoverable process. Child functions
// should stick to normal golang error handling and expect the upstream
// controller to call AddRecoverable() for you.
for i := range items { for i := range items {
clientBasedGetter := func(i int) error { clientBasedGetter := func(i int) error {
if err := getIthItem(i); err != nil { if err := getIthItem(i); err != nil {
// lower level calls don't Add to the fault.Errors. // lower level calls don't AddRecoverable to the fault.Bus.
// they handl errors like normal. // they stick to normal golang error handling.
return errors.Wrap(err, "dependency") return errors.Wrap(err, "dependency")
} }
@ -126,121 +130,158 @@ func ExampleErrors_Add() {
if err := clientBasedGetter(i); err != nil { if err := clientBasedGetter(i); err != nil {
// Here at the top of the loop is the correct place // Here at the top of the loop is the correct place
// to Add an error using fault. // to aggregate the error using fault.
errs.Add(err) // Side note: technically, you should use a local bus
// here (see below) instead of errs.
errs.AddRecoverable(err)
} }
} }
// Iteration should exit anytime the primary error in fault is // Iteration should exit anytime the fault failure is non-nil.
// non-nil. fault.Errors does not expose the failFast flag // fault.Bus does not expose the failFast flag directly. Instead,
// directly. Instead, errors from Add() will automatically // when failFast is true, errors from AddRecoverable() automatically
// promote to the Err() value. Therefore, loops only ned to // promote to the Failure() spot. Recoverable handling only needs to
// check the errs.Err(). If it is non-nil, then the loop should break. // check the errs.Failure(). If it is non-nil, then the loop should break.
for i := range items { for i := range items {
if errs.Err() != nil { if errs.Failure() != nil {
// if failFast == true errs.Add() was called, // if failFast == true errs.AddRecoverable() was called,
// we'll catch the error here. // we'll catch the error here.
break break
} }
if err := getIthItem(i); err != nil { if err := getIthItem(i); err != nil {
errs.Add(err) errs.AddRecoverable(err)
} }
} }
} }
// ExampleErrors_Err describes retrieving the non-recoverable error. // ExampleBus_Failure describes retrieving the non-recoverable error.
func ExampleErrors_Err() { func ExampleBus_Failure() {
errs := fault.New(false) errs := fault.New(false)
errs.Fail(errors.New("catastrophe")) errs.Fail(errors.New("catastrophe"))
// Err() returns the primary failure. // Failure() returns the primary failure.
err := errs.Err() err := errs.Failure()
fmt.Println(err) fmt.Println(err)
// if multiple Failures occur, each one after the first gets // if multiple Failures occur, each one after the first gets
// added to the Errs slice. // added to the Recoverable slice as an overflow measure.
errs.Fail(errors.New("another catastrophe")) errs.Fail(errors.New("another catastrophe"))
errSl := errs.Errs() errSl := errs.Recovered()
for _, e := range errSl { for _, e := range errSl {
fmt.Println(e) fmt.Println(e)
} }
// If Err() is nil, then you can assume the operation completed. // If Failure() is nil, then you can assume the operation completed.
// A complete operation is not necessarily an error-free operation. // A complete operation is not necessarily an error-free operation.
// Recoverable errors may still have been added using Add(err). // Recoverable errors may still have been added using AddRecoverable(err).
//
// Even if Err() is nil, Errs() can be non-empty.
// Make sure you check both. // Make sure you check both.
errs = fault.New(true) // If failFast is set to true, then the first recoerable error Added gets
// If failFast is set to true, then the first error Add()ed gets
// promoted to the Err() position. // promoted to the Err() position.
errs = fault.New(true)
errs.Add(errors.New("not catastrophic, but still becomes the Err()")) errs.AddRecoverable(errors.New("not catastrophic, but still becomes the Failure()"))
err = errs.Err() err = errs.Failure()
fmt.Println(err) fmt.Println(err)
// Output: catastrophe // Output: catastrophe
// another catastrophe // another catastrophe
// not catastrophic, but still becomes the Err() // not catastrophic, but still becomes the Failure()
} }
// ExampleErrors_Errs describes retrieving individual errors. // ExampleBus_Recovered describes the errors that processing was able to
func ExampleErrors_Errs() { // recover from and continue.
func ExampleErrors_Recovered() {
errs := fault.New(false) errs := fault.New(false)
errs.Add(errors.New("not catastrophic")) errs.AddRecoverable(errors.New("not catastrophic"))
errs.Add(errors.New("something unwanted")) errs.AddRecoverable(errors.New("something unwanted"))
// Errs() gets the slice of all recoverable errors Add()ed during // Recovered() gets the slice of all recoverable errors added during
// the run, but which did not force the process to exit. // the run, but which did not cause a failure.
// //
// Errs() only needs to be investigated by the end user at the // Recovered() should never be investigated during lower level processing.
// conclusion of an operation. Checking Errs() within lower- // Implementation only ever needs to check Failure(). If an error didn't
// layer code is a smell. Funcs should return a standard error, // promote to the Failure slot, then it should be ignored.
// or errs.Err(), if they need upstream handlers to handle the errors. //
errSl := errs.Errs() // The end user, at the conclusion of an operation, is the intended recipient
// of the Recovered error slice. After returning to the interface layer
// (the CLI or SDK), it's the job of the end user at that location to
// iterate through those errors and record them as wanted.
errSl := errs.Recovered()
for _, err := range errSl { for _, err := range errSl {
fmt.Println(err) fmt.Println(err)
} }
// One or more errors in errs.Errs() does not necessarily mean the // One or more errors in errs.Recovered() does not necessarily mean the
// process failed. You can have non-zero Errs() but a nil Err(). // process failed. You can have non-zero Recovered() but a nil Failure().
if errs.Err() == nil { if errs.Failure() == nil {
fmt.Println("Err() is nil") fmt.Println("Failure() is nil")
} }
// If Errs() is nil, then you can assume that no recoverable or // Inversely, if Recovered() is nil, then you can assume that no recoverable
// iteration-based errors occurred. But that does not necessarily // or iteration-based errors occurred. But that does not necessarily
// mean the operation was able to complete. // mean the operation was able to complete.
// //
// Even if Errs() contains zero items, Err() can be non-nil. // Even if Recovered() contains zero items, Err() can be non-nil.
// Make sure you check both. // Make sure you check both.
// Output: not catastrophic // Output: not catastrophic
// something unwanted // something unwanted
// Err() is nil // Failure() is nil
} }
// ExampleErrors_e2e showcases a more complex integration. func ExampleBus_Local() {
func ExampleErrors_e2e() { // It is common for Corso to run operations in parallel,
// and for iterations to be nested within iterations. To
// avoid mistakenly returning an error that was sourced
// from some other async iteration, recoverable instances
// are aggrgated into a Local.
errs := fault.New(false)
el := errs.Local()
err := func() error {
for i := range items {
if el.Failure() != nil {
break
}
if err := getIthItem(i); err != nil {
// instead of calling errs.AddRecoverable(err), we call the
// local bus's Add method. The error will still get
// added to the errs.Recovered() set. But if this err
// causes the run to fail, only this local bus treats
// it as the causal failure.
el.AddRecoverable(err)
}
}
return el.Failure()
}()
if err != nil {
// handle the Faiulre() that appeared in the local bus.
fmt.Println("failure occurred", errs.Failure())
}
}
// ExampleE2e showcases a more complex integration.
func Example_e2e() {
oper := newOperation() oper := newOperation()
// imagine that we're a user, calling into corso SDK. // imagine that we're a user, calling into corso SDK.
// (fake funcs used here to minimize example bloat) // (fake funcs used here to minimize example bloat)
// //
// The operation is our controller, we expect it to // The operation is our controller, we expect it to
// generate a new fault.Errors when constructed, and // generate a new fault.Bus when constructed, and
// to return that struct when we call Run() // to return that struct when we call Run()
errs := oper.Run() errs := oper.Run()
// Let's investigate what went on inside. Since we're at // Let's investigate what went on inside. Since we're at
// the top of our controller, and returning a fault.Errors, // the top of our controller, and returning a fault.Bus,
// all the error handlers set the Fail() case. // all the error handlers set the Fail() case.
/* Run() */ /* Run() */
func() *fault.Errors { func() *fault.Bus {
if err := connectClient(); err != nil { if err := connectClient(); err != nil {
// Fail() here; we're top level in the controller // Fail() here; we're top level in the controller
// and this is a non-recoverable issue // and this is a non-recoverable issue
@ -264,12 +305,13 @@ func ExampleErrors_e2e() {
// What about the lower level handling? storeData didn't // What about the lower level handling? storeData didn't
// return an error, so what's happening there? // return an error, so what's happening there?
/* storeData */ /* storeData */
func(data []any, errs *fault.Errors) { err := func(data []any, errs *fault.Bus) error {
// this is downstream in our code somewhere // this is downstream in our code somewhere
storer := func(a any) error { storer := func(a any) error {
if err := dependencyCall(); err != nil { if err := dependencyCall(); err != nil {
// we're not passing in or calling fault.Errors here, // we're not passing in or calling fault.Bus here,
// because this isn't the iteration handler, it's just // because this isn't the iteration handler, it's just
// a regular error. // a regular error.
return errors.Wrap(err, "dependency") return errors.Wrap(err, "dependency")
@ -278,36 +320,48 @@ func ExampleErrors_e2e() {
return nil return nil
} }
el := errs.Local()
for _, d := range data { for _, d := range data {
if errs.Err() != nil { if el.Failure() != nil {
break break
} }
if err := storer(d); err != nil { if err := storer(d); err != nil {
// Since we're at the top of the iteration, we need // Since we're at the top of the iteration, we need
// to add each error to the fault.Errors struct. // to add each error to the fault.localBus struct.
errs.Add(err) el.AddRecoverable(err)
} }
} }
}(nil, nil)
// then at the end of the oper.Run, we investigate the results. // at the end of the func, we need to return trkr.Failure()
if errs.Err() != nil { // just in case the local bus promoted an error to the failure
// handle the primary error // position. If we don't return it like normal error handling,
fmt.Println("err occurred", errs.Err()) // then we'll lose scope of that error.
return el.Failure()
}(nil, nil)
if err != nil {
fmt.Println("errored", err)
} }
for _, err := range errs.Errs() { // At the end of the oper.Run, when returning to the interface
// layer, we investigate the results.
if errs.Failure() != nil {
// handle the primary error
fmt.Println("err occurred", errs.Failure())
}
for _, err := range errs.Recovered() {
// handle each recoverable error // handle each recoverable error
fmt.Println("recoverable err occurred", err) fmt.Println("recoverable err occurred", err)
} }
} }
// ExampleErrors_Err_return showcases when to return err or nil vs errs.Err() // ExampleBus_Failure_return showcases when to return an error or
func ExampleErrors_Err_return() { // nil vs errs.Failure() vs *fault.Bus
// The general rule of thumb is to always handle the error directly func ExampleErrors_Failure_return() {
// by returning err, or nil, or any variety of extension (wrap, // The general rule of thumb is stick to standard golang error
// stack, clues, etc). // handling whenever possible.
fn := func() error { fn := func() error {
if err := dependency.do(); err != nil { if err := dependency.do(); err != nil {
return errors.Wrap(err, "direct") return errors.Wrap(err, "direct")
@ -319,26 +373,47 @@ func ExampleErrors_Err_return() {
fmt.Println(err) fmt.Println(err)
} }
// The exception is if you're handling recoverable errors. Those // The first exception is if you're handling recoverable errors. Recoverable
// funcs should always return errs.Err(), in case a recoverable // error handling should create a local bus instance, and return localBus.Failure()
// error happened on the last round of iteration. // so that the immediate upstream caller can be made aware of the current failure.
fn2 := func(todo []string, errs *fault.Errors) error { fn2 := func(todo []string, errs *fault.Bus) error {
for range todo { for range todo {
if errs.Err() != nil { if errs.Failure() != nil {
return errs.Err() return errs.Failure()
} }
if err := dependency.do(); err != nil { if err := dependency.do(); err != nil {
errs.Add(errors.Wrap(err, "recoverable")) errs.AddRecoverable(errors.Wrap(err, "recoverable"))
} }
} }
return errs.Err() return errs.Failure()
} }
if err := fn2([]string{"a"}, fault.New(true)); err != nil { if err := fn2([]string{"a"}, fault.New(true)); err != nil {
fmt.Println(err) fmt.Println(err)
} }
// The second exception is if you're returning at the interface layer.
// In that case, you're expected to return the fault.Bus itself, so that
// callers can review the fault data.
operationFn := func(errs *fault.Bus) *fault.Bus {
if _, err := getData(); err != nil {
return errs.Fail(err)
}
return nil
}
fbus := operationFn(fault.New(true))
if fbus.Failure() != nil {
fmt.Println("failure", fbus.Failure())
}
for _, err := range fbus.Recovered() {
fmt.Println("recovered", err)
}
// Output: direct: caught one // Output: direct: caught one
// recoverable: caught one // recoverable: caught one
} }

View File

@ -6,21 +6,20 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
type Errors struct { type Bus struct {
mu *sync.Mutex mu *sync.Mutex
// err identifies non-recoverable errors. This includes // failure identifies non-recoverable errors. This includes
// non-start cases (ex: cannot connect to client), hard- // non-start cases (ex: cannot connect to client), hard-
// stop issues (ex: credentials expired) or conscious exit // stop issues (ex: credentials expired) or conscious exit
// cases (ex: iteration error + failFast config). // cases (ex: iteration error + failFast config).
err error failure error
// errs is the accumulation of recoverable or iterated // recoverable is the accumulation of recoverable errors
// errors. Eg: if a process is retrieving N items, and // Eg: if a process is retrieving N items, and 1 of the
// 1 of the items fails to be retrieved, but the rest of // items fails to be retrieved, but the rest of them succeed,
// them succeed, we'd expect to see 1 error added to this // we'd expect to see 1 error added to this slice.
// slice. recoverable []error
errs []error
// if failFast is true, the first errs addition will // if failFast is true, the first errs addition will
// get promoted to the err value. This signifies a // get promoted to the err value. This signifies a
@ -29,51 +28,56 @@ type Errors struct {
failFast bool failFast bool
} }
// ErrorsData provides the errors data alone, without sync // Errors provides the errors data alone, without sync
// controls, allowing the data to be persisted. // controls, allowing the data to be persisted.
type ErrorsData struct { type Errors struct {
Err error `json:"-"` Failure error `json:"failure"`
Errs []error `json:"-"` Recovered []error `json:"-"`
FailFast bool `json:"failFast"` FailFast bool `json:"failFast"`
} }
// New constructs a new error with default values in place. // New constructs a new error with default values in place.
func New(failFast bool) *Errors { func New(failFast bool) *Bus {
return &Errors{ return &Bus{
mu: &sync.Mutex{}, mu: &sync.Mutex{},
errs: []error{}, recoverable: []error{},
failFast: failFast, failFast: failFast,
} }
} }
// Err returns the primary error. If not nil, this // Failure returns the primary error. If not nil, this
// indicates the operation exited prior to completion. // indicates the operation exited prior to completion.
func (e *Errors) Err() error { func (e *Bus) Failure() error {
return e.err return e.failure
} }
// Errs returns the slice of recoverable and // Recovered returns the slice of errors that occurred in
// iterated errors. // recoverable points of processing. This is often during
func (e *Errors) Errs() []error { // iteration where a single failure (ex: retrieving an item),
return e.errs // doesn't require the entire process to end.
func (e *Bus) Recovered() []error {
return e.recoverable
} }
// Data returns the plain set of error data // Errors returns the plain record of errors that were aggregated
// without any sync properties. // within a fult Bus.
func (e *Errors) Data() ErrorsData { func (e *Bus) Errors() Errors {
return ErrorsData{ return Errors{
Err: e.err, Failure: e.failure,
Errs: slices.Clone(e.errs), Recovered: slices.Clone(e.recoverable),
FailFast: e.failFast, FailFast: e.failFast,
} }
} }
// TODO: introduce Failer interface // Fail sets the non-recoverable error (ie: bus.failure)
// in thebus. If a failure error is already present,
// Fail sets the non-recoverable error (ie: errors.err) // the error gets added to the recoverable slice for
// in the errors struct. If a non-recoverable error is // purposes of tracking.
// already present, the error gets added to the errs slice. //
func (e *Errors) Fail(err error) *Errors { // TODO: Return Data, not Bus. The consumers of a failure
// should care about the state of data, not the communication
// pattern.
func (e *Bus) Fail(err error) *Bus {
if err == nil { if err == nil {
return e return e
} }
@ -81,28 +85,33 @@ func (e *Errors) Fail(err error) *Errors {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
return e.setErr(err) return e.setFailure(err)
} }
// setErr handles setting errors.err. Sync locking gets // setErr handles setting bus.failure. Sync locking gets
// handled upstream of this call. // handled upstream of this call.
func (e *Errors) setErr(err error) *Errors { func (e *Bus) setFailure(err error) *Bus {
if e.err == nil { if e.failure == nil {
e.err = err e.failure = err
return e return e
} }
e.errs = append(e.errs, err) // technically not a recoverable error: we're using the
// recoverable slice as an overflow container here to
// ensure everything is tracked.
e.recoverable = append(e.recoverable, err)
return e return e
} }
// Add appends the error to the slice of recoverable and // AddRecoverable appends the error to the slice of recoverable
// iterated errors (ie: errors.errs). If failFast is true, // errors (ie: bus.recoverable). If failFast is true, the first
// the first Added error will get copied to errors.err, // added error will get copied to bus.failure, causing the bus
// causing the errors struct to identify as non-recoverably // to identify as non-recoverably failed.
// failed. //
func (e *Errors) Add(err error) *Errors { // TODO: nil return, not Bus, since we don't want people to return
// from errors.AddRecoverable().
func (e *Bus) AddRecoverable(err error) *Bus {
if err == nil { if err == nil {
return e return e
} }
@ -110,17 +119,62 @@ func (e *Errors) Add(err error) *Errors {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
return e.addErr(err) return e.addRecoverableErr(err)
} }
// addErr handles adding errors to errors.errs. Sync locking // addErr handles adding errors to errors.errs. Sync locking
// gets handled upstream of this call. // gets handled upstream of this call.
func (e *Errors) addErr(err error) *Errors { func (e *Bus) addRecoverableErr(err error) *Bus {
if e.err == nil && e.failFast { if e.failure == nil && e.failFast {
e.setErr(err) e.setFailure(err)
} }
e.errs = append(e.errs, err) e.recoverable = append(e.recoverable, err)
return e return e
} }
// ---------------------------------------------------------------------------
// Local aggregator
// ---------------------------------------------------------------------------
// Local constructs a new local bus to handle error aggregation in a
// constrained scope. Local busses shouldn't be passed down to other
// funcs, and the function that spawned the local bus should always
// return `local.Failure()` to ensure that hard failures are propagated
// back upstream.
func (e *Bus) Local() *localBus {
return &localBus{
mu: &sync.Mutex{},
bus: e,
}
}
type localBus struct {
mu *sync.Mutex
bus *Bus
current error
}
func (e *localBus) AddRecoverable(err error) {
if err == nil {
return
}
e.mu.Lock()
defer e.mu.Unlock()
if e.bus.Failure() == nil && e.bus.failFast {
e.current = err
}
e.bus.AddRecoverable(err)
}
// Failure returns the failure that happened within the local bus.
// It does not return the underlying bus.Failure(), only the failure
// that was recorded within the local bus instance. This error should
// get returned by any func which created a local bus.
func (e *localBus) Failure() error {
return e.current
}

View File

@ -75,16 +75,16 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
suite.T().Run(test.name, func(t *testing.T) { suite.T().Run(test.name, func(t *testing.T) {
n := fault.New(test.failFast) n := fault.New(test.failFast)
require.NotNil(t, n) require.NotNil(t, n)
require.NoError(t, n.Err()) require.NoError(t, n.Failure())
require.Empty(t, n.Errs()) require.Empty(t, n.Recovered())
e := n.Fail(test.fail) e := n.Fail(test.fail)
require.NotNil(t, e) require.NotNil(t, e)
e = n.Add(test.add) e = n.AddRecoverable(test.add)
require.NotNil(t, e) require.NotNil(t, e)
test.expect(t, n.Err()) test.expect(t, n.Failure())
}) })
} }
} }
@ -94,16 +94,16 @@ func (suite *FaultErrorsUnitSuite) TestFail() {
n := fault.New(false) n := fault.New(false)
require.NotNil(t, n) require.NotNil(t, n)
require.NoError(t, n.Err()) require.NoError(t, n.Failure())
require.Empty(t, n.Errs()) require.Empty(t, n.Recovered())
n.Fail(assert.AnError) n.Fail(assert.AnError)
assert.Error(t, n.Err()) assert.Error(t, n.Failure())
assert.Empty(t, n.Errs()) assert.Empty(t, n.Recovered())
n.Fail(assert.AnError) n.Fail(assert.AnError)
assert.Error(t, n.Err()) assert.Error(t, n.Failure())
assert.NotEmpty(t, n.Errs()) assert.NotEmpty(t, n.Recovered())
} }
func (suite *FaultErrorsUnitSuite) TestErrs() { func (suite *FaultErrorsUnitSuite) TestErrs() {
@ -154,10 +154,10 @@ func (suite *FaultErrorsUnitSuite) TestErrs() {
e := n.Fail(test.fail) e := n.Fail(test.fail)
require.NotNil(t, e) require.NotNil(t, e)
e = n.Add(test.add) e = n.AddRecoverable(test.add)
require.NotNil(t, e) require.NotNil(t, e)
test.expect(t, n.Errs()) test.expect(t, n.Recovered())
}) })
} }
} }
@ -168,16 +168,16 @@ func (suite *FaultErrorsUnitSuite) TestAdd() {
n := fault.New(true) n := fault.New(true)
require.NotNil(t, n) require.NotNil(t, n)
n.Add(assert.AnError) n.AddRecoverable(assert.AnError)
assert.Error(t, n.Err()) assert.Error(t, n.Failure())
assert.Len(t, n.Errs(), 1) assert.Len(t, n.Recovered(), 1)
n.Add(assert.AnError) n.AddRecoverable(assert.AnError)
assert.Error(t, n.Err()) assert.Error(t, n.Failure())
assert.Len(t, n.Errs(), 2) assert.Len(t, n.Recovered(), 2)
} }
func (suite *FaultErrorsUnitSuite) TestData() { func (suite *FaultErrorsUnitSuite) TestErrors() {
t := suite.T() t := suite.T()
// not fail-fast // not fail-fast
@ -185,12 +185,12 @@ func (suite *FaultErrorsUnitSuite) TestData() {
require.NotNil(t, n) require.NotNil(t, n)
n.Fail(errors.New("fail")) n.Fail(errors.New("fail"))
n.Add(errors.New("1")) n.AddRecoverable(errors.New("1"))
n.Add(errors.New("2")) n.AddRecoverable(errors.New("2"))
d := n.Data() d := n.Errors()
assert.Equal(t, n.Err(), d.Err) assert.Equal(t, n.Failure(), d.Failure)
assert.ElementsMatch(t, n.Errs(), d.Errs) assert.ElementsMatch(t, n.Recovered(), d.Recovered)
assert.False(t, d.FailFast) assert.False(t, d.FailFast)
// fail-fast // fail-fast
@ -198,12 +198,12 @@ func (suite *FaultErrorsUnitSuite) TestData() {
require.NotNil(t, n) require.NotNil(t, n)
n.Fail(errors.New("fail")) n.Fail(errors.New("fail"))
n.Add(errors.New("1")) n.AddRecoverable(errors.New("1"))
n.Add(errors.New("2")) n.AddRecoverable(errors.New("2"))
d = n.Data() d = n.Errors()
assert.Equal(t, n.Err(), d.Err) assert.Equal(t, n.Failure(), d.Failure)
assert.ElementsMatch(t, n.Errs(), d.Errs) assert.ElementsMatch(t, n.Recovered(), d.Recovered)
assert.True(t, d.FailFast) assert.True(t, d.FailFast)
} }
@ -214,17 +214,13 @@ func (suite *FaultErrorsUnitSuite) TestMarshalUnmarshal() {
n := fault.New(false) n := fault.New(false)
require.NotNil(t, n) require.NotNil(t, n)
n.Add(errors.New("1")) n.AddRecoverable(errors.New("1"))
n.Add(errors.New("2")) n.AddRecoverable(errors.New("2"))
data := n.Data() bs, err := json.Marshal(n.Errors())
jsonStr, err := json.Marshal(data)
require.NoError(t, err) require.NoError(t, err)
um := fault.ErrorsData{} err = json.Unmarshal(bs, &fault.Errors{})
err = json.Unmarshal(jsonStr, &um)
require.NoError(t, err) require.NoError(t, err)
} }
@ -246,8 +242,34 @@ func (suite *FaultErrorsUnitSuite) TestUnmarshalLegacy() {
t.Logf("jsonStr is %s\n", jsonStr) t.Logf("jsonStr is %s\n", jsonStr)
um := fault.ErrorsData{} um := fault.Errors{}
err = json.Unmarshal(jsonStr, &um) err = json.Unmarshal(jsonStr, &um)
require.NoError(t, err) require.NoError(t, err)
} }
func (suite *FaultErrorsUnitSuite) TestTracker() {
t := suite.T()
eb := fault.New(false)
lb := eb.Local()
assert.NoError(t, lb.Failure())
assert.Empty(t, eb.Recovered())
lb.AddRecoverable(assert.AnError)
assert.NoError(t, lb.Failure())
assert.NoError(t, eb.Failure())
assert.NotEmpty(t, eb.Recovered())
ebt := fault.New(true)
lbt := ebt.Local()
assert.NoError(t, lbt.Failure())
assert.Empty(t, ebt.Recovered())
lbt.AddRecoverable(assert.AnError)
assert.Error(t, lbt.Failure())
assert.Error(t, ebt.Failure())
assert.NotEmpty(t, ebt.Recovered())
}

View File

@ -185,8 +185,8 @@ func runBackupLoadTest(
assert.Less(t, 0, b.Results.ItemsWritten, "items written") assert.Less(t, 0, b.Results.ItemsWritten, "items written")
assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded") assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners") assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners")
assert.NoError(t, b.Errors.Err(), "non-recoverable error") assert.NoError(t, b.Errors.Failure(), "non-recoverable error")
assert.Empty(t, b.Errors.Errs(), "recoverable errors") assert.Empty(t, b.Errors.Recovered(), "recoverable errors")
assert.NoError(t, b.Results.ReadErrors, "read errors") assert.NoError(t, b.Results.ReadErrors, "read errors")
assert.NoError(t, b.Results.WriteErrors, "write errors") assert.NoError(t, b.Results.WriteErrors, "write errors")
}) })
@ -242,7 +242,7 @@ func runBackupDetailsLoadTest(
t.Run("backup_details_"+name, func(t *testing.T) { t.Run("backup_details_"+name, func(t *testing.T) {
var ( var (
errs *fault.Errors errs *fault.Bus
b *backup.Backup b *backup.Backup
ds *details.Details ds *details.Details
labels = pprof.Labels("details_load_test", name) labels = pprof.Labels("details_load_test", name)
@ -252,8 +252,8 @@ func runBackupDetailsLoadTest(
ds, b, errs = r.BackupDetails(ctx, backupID) ds, b, errs = r.BackupDetails(ctx, backupID)
}) })
require.NoError(t, errs.Err(), "retrieving details in backup "+backupID) require.NoError(t, errs.Failure(), "retrieving details in backup "+backupID)
require.Empty(t, errs.Errs(), "retrieving details in backup "+backupID) require.Empty(t, errs.Recovered(), "retrieving details in backup "+backupID)
require.NotNil(t, ds, "backup details must exist") require.NotNil(t, ds, "backup details must exist")
require.NotNil(t, b, "backup must exist") require.NotNil(t, b, "backup must exist")
@ -294,8 +294,8 @@ func doRestoreLoadTest(
assert.Less(t, 0, r.Results.ItemsRead, "items read") assert.Less(t, 0, r.Results.ItemsRead, "items read")
assert.Less(t, 0, r.Results.ItemsWritten, "items written") assert.Less(t, 0, r.Results.ItemsWritten, "items written")
assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners") assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners")
assert.NoError(t, r.Errors.Err(), "non-recoverable error") assert.NoError(t, r.Errors.Failure(), "non-recoverable error")
assert.Empty(t, r.Errors.Errs(), "recoverable errors") assert.Empty(t, r.Errors.Recovered(), "recoverable errors")
assert.NoError(t, r.Results.ReadErrors, "read errors") assert.NoError(t, r.Results.ReadErrors, "read errors")
assert.NoError(t, r.Results.WriteErrors, "write errors") assert.NoError(t, r.Results.WriteErrors, "write errors")
assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items") assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items")

View File

@ -31,12 +31,12 @@ var ErrorRepoAlreadyExists = errors.New("a repository was already initialized wi
// repository. // repository.
type BackupGetter interface { type BackupGetter interface {
Backup(ctx context.Context, id model.StableID) (*backup.Backup, error) Backup(ctx context.Context, id model.StableID) (*backup.Backup, error)
Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Bus)
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
BackupDetails( BackupDetails(
ctx context.Context, ctx context.Context,
backupID string, backupID string,
) (*details.Details, *backup.Backup, *fault.Errors) ) (*details.Details, *backup.Backup, *fault.Bus)
} }
type Repository interface { type Repository interface {
@ -298,7 +298,7 @@ func (r repository) Backup(ctx context.Context, id model.StableID) (*backup.Back
// BackupsByID lists backups by ID. Returns as many backups as possible with // BackupsByID lists backups by ID. Returns as many backups as possible with
// errors for the backups it was unable to retrieve. // errors for the backups it was unable to retrieve.
func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors) { func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Bus) {
var ( var (
bups []*backup.Backup bups []*backup.Backup
errs = fault.New(false) errs = fault.New(false)
@ -308,7 +308,7 @@ func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backu
for _, id := range ids { for _, id := range ids {
b, err := sw.GetBackup(ctx, id) b, err := sw.GetBackup(ctx, id)
if err != nil { if err != nil {
errs.Add(clues.Stack(err).With("backup_id", id)) errs.AddRecoverable(clues.Stack(err).With("backup_id", id))
} }
bups = append(bups, b) bups = append(bups, b)
@ -327,7 +327,7 @@ func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption)
func (r repository) BackupDetails( func (r repository) BackupDetails(
ctx context.Context, ctx context.Context,
backupID string, backupID string,
) (*details.Details, *backup.Backup, *fault.Errors) { ) (*details.Details, *backup.Backup, *fault.Bus) {
sw := store.NewKopiaStore(r.modelStore) sw := store.NewKopiaStore(r.modelStore)
errs := fault.New(false) errs := fault.New(false)

View File

@ -719,7 +719,7 @@ func (s ExchangeScope) setDefaults() {
func (s exchange) Reduce( func (s exchange) Reduce(
ctx context.Context, ctx context.Context,
deets *details.Details, deets *details.Details,
errs *fault.Errors, errs *fault.Bus,
) *details.Details { ) *details.Details {
return reduce[ExchangeScope]( return reduce[ExchangeScope](
ctx, ctx,

View File

@ -498,7 +498,7 @@ func (s OneDriveScope) DiscreteCopy(user string) OneDriveScope {
func (s oneDrive) Reduce( func (s oneDrive) Reduce(
ctx context.Context, ctx context.Context,
deets *details.Details, deets *details.Details,
errs *fault.Errors, errs *fault.Bus,
) *details.Details { ) *details.Details {
return reduce[OneDriveScope]( return reduce[OneDriveScope](
ctx, ctx,

View File

@ -288,7 +288,7 @@ func reduce[T scopeT, C categoryT](
deets *details.Details, deets *details.Details,
s Selector, s Selector,
dataCategories map[path.CategoryType]C, dataCategories map[path.CategoryType]C,
errs *fault.Errors, errs *fault.Bus,
) *details.Details { ) *details.Details {
ctx, end := D.Span(ctx, "selectors:reduce") ctx, end := D.Span(ctx, "selectors:reduce")
defer end() defer end()
@ -314,7 +314,7 @@ func reduce[T scopeT, C categoryT](
for _, ent := range deets.Items() { for _, ent := range deets.Items() {
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true) repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "transforming repoRef to path").WithClues(ctx)) errs.AddRecoverable(clues.Wrap(err, "transforming repoRef to path").WithClues(ctx))
continue continue
} }
@ -326,7 +326,7 @@ func reduce[T scopeT, C categoryT](
if len(ent.LocationRef) > 0 { if len(ent.LocationRef) > 0 {
pb, err := path.Builder{}.SplitUnescapeAppend(ent.LocationRef) pb, err := path.Builder{}.SplitUnescapeAppend(ent.LocationRef)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx)) errs.AddRecoverable(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
continue continue
} }
@ -338,7 +338,7 @@ func reduce[T scopeT, C categoryT](
repoPath.Category(), repoPath.Category(),
true) true)
if err != nil { if err != nil {
errs.Add(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx)) errs.AddRecoverable(clues.Wrap(err, "transforming locationRef to path").WithClues(ctx))
continue continue
} }
} }

View File

@ -284,7 +284,7 @@ func (suite *SelectorScopesSuite) TestReduce() {
dataCats, dataCats,
errs) errs)
require.NotNil(t, result) require.NotNil(t, result)
require.NoError(t, errs.Err(), "no recoverable errors") require.NoError(t, errs.Failure(), "no recoverable errors")
assert.Len(t, result.Entries, test.expectLen) assert.Len(t, result.Entries, test.expectLen)
}) })
} }

View File

@ -70,7 +70,7 @@ var (
const All = "All" const All = "All"
type Reducer interface { type Reducer interface {
Reduce(context.Context, *details.Details, *fault.Errors) *details.Details Reduce(context.Context, *details.Details, *fault.Bus) *details.Details
} }
// selectorResourceOwners aggregates all discrete path category types described // selectorResourceOwners aggregates all discrete path category types described
@ -240,7 +240,7 @@ func (s Selector) PathService() path.ServiceType {
func (s Selector) Reduce( func (s Selector) Reduce(
ctx context.Context, ctx context.Context,
deets *details.Details, deets *details.Details,
errs *fault.Errors, errs *fault.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
r, err := selectorAsIface[Reducer](s) r, err := selectorAsIface[Reducer](s)
if err != nil { if err != nil {

View File

@ -570,7 +570,7 @@ func (s SharePointScope) DiscreteCopy(site string) SharePointScope {
func (s sharePoint) Reduce( func (s sharePoint) Reduce(
ctx context.Context, ctx context.Context,
deets *details.Details, deets *details.Details,
errs *fault.Errors, errs *fault.Bus,
) *details.Details { ) *details.Details {
return reduce[SharePointScope]( return reduce[SharePointScope](
ctx, ctx,

View File

@ -31,12 +31,12 @@ func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) {
return nil, err return nil, err
} }
return users, errs.Err() return users, errs.Failure()
} }
// Users returns a list of users in the specified M365 tenant // Users returns a list of users in the specified M365 tenant
// TODO: Implement paging support // TODO: Implement paging support
func Users(ctx context.Context, acct account.Account, errs *fault.Errors) ([]*User, error) { func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, error) {
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs) gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "initializing M365 graph connection") return nil, errors.Wrap(err, "initializing M365 graph connection")
@ -61,7 +61,7 @@ func Users(ctx context.Context, acct account.Account, errs *fault.Errors) ([]*Us
return ret, nil return ret, nil
} }
func UserIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { func UserIDs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
users, err := Users(ctx, acct, errs) users, err := Users(ctx, acct, errs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -77,7 +77,7 @@ func UserIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]s
// UserPNs retrieves all user principleNames in the tenant. Principle Names // UserPNs retrieves all user principleNames in the tenant. Principle Names
// can be used analogous userIDs in graph API queries. // can be used analogous userIDs in graph API queries.
func UserPNs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { func UserPNs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
users, err := Users(ctx, acct, errs) users, err := Users(ctx, acct, errs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -92,7 +92,7 @@ func UserPNs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]s
} }
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant // SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs) gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "initializing M365 graph connection") return nil, errors.Wrap(err, "initializing M365 graph connection")
@ -102,7 +102,7 @@ func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]
} }
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant // SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
func SiteIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { func SiteIDs(ctx context.Context, acct account.Account, errs *fault.Bus) ([]string, error) {
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs) gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "initializing graph connection") return nil, errors.Wrap(err, "initializing graph connection")