diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 66898108b..85b95bae2 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -132,7 +132,7 @@ func (rw *restoreStreamReader) Read(p []byte) (n int, err error) { } type itemDetails struct { - info *details.ItemInfo + infoFunc func() (details.ItemInfo, error) repoPath path.Path prevPath path.Path locationPath *path.Builder @@ -198,13 +198,16 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { ctx := clues.Add( cp.ctx, "service", d.repoPath.Service().String(), - "category", d.repoPath.Category().String()) + "category", d.repoPath.Category().String(), + "item_path", d.repoPath, + "item_loc", d.locationPath) // These items were sourced from a base snapshot or were cached in kopia so we // never had to materialize their details in-memory. - if d.info == nil || d.cached { + if d.infoFunc == nil || d.cached { if d.prevPath == nil { cp.errs.AddRecoverable(ctx, clues.New("finished file sourced from previous backup with no previous path"). + WithClues(ctx). Label(fault.LabelForceNoBackupCreation)) return @@ -220,18 +223,32 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { d.locationPath) if err != nil { cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to merge list"). + WithClues(ctx). Label(fault.LabelForceNoBackupCreation)) } return } - err = cp.deets.Add( - d.repoPath, - d.locationPath, - *d.info) + info, err := d.infoFunc() + if err != nil { + cp.errs.AddRecoverable(ctx, clues.Wrap(err, "getting ItemInfo"). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } else if !ptr.Val(d.modTime).Equal(info.Modified()) { + cp.errs.AddRecoverable(ctx, clues.New("item modTime mismatch"). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + err = cp.deets.Add(d.repoPath, d.locationPath, info) if err != nil { cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to details"). + WithClues(ctx). Label(fault.LabelForceNoBackupCreation)) return @@ -394,13 +411,12 @@ func collectionEntries( // Relative path given to us in the callback is missing the root // element. Add to pending set before calling the callback to avoid race // conditions when the item is completed. - // - // TODO(ashmrtn): If we want to pull item info for cached item from a - // previous snapshot then we should populate prevPath here and leave - // info nil. - itemInfo := ei.Info() d := &itemDetails{ - info: &itemInfo, + // TODO(ashmrtn): Update API in data package to return an error and + // then remove this wrapper. + infoFunc: func() (details.ItemInfo, error) { + return ei.Info(), nil + }, repoPath: itemPath, // Also use the current path as the previous path for this item. This // is so that if the item is marked as cached and we need to merge @@ -517,7 +533,6 @@ func streamBaseEntries( // the item to progress and having progress aggregate everything for // later. d := &itemDetails{ - info: nil, repoPath: itemPath, prevPath: prevItemPath, locationPath: locationPath, diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index 2518f708f..61fcc2de7 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -394,10 +394,12 @@ var finishedFileTable = []struct { return map[string]testInfo{ fname: { info: &itemDetails{ - info: &details.ItemInfo{ - Exchange: &details.ExchangeInfo{ - ItemType: details.ExchangeMail, - }, + infoFunc: func() (details.ItemInfo, error) { + return details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeMail, + }, + }, nil }, repoPath: fpath, locationPath: path.Builder{}.Append(fpath.Folders()...), @@ -430,10 +432,12 @@ var finishedFileTable = []struct { return map[string]testInfo{ fname: { info: &itemDetails{ - info: &details.ItemInfo{ - Exchange: &details.ExchangeInfo{ - ItemType: details.ExchangeMail, - }, + infoFunc: func() (details.ItemInfo, error) { + return details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeMail, + }, + }, nil }, repoPath: fpath, }, @@ -526,7 +530,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() { } if cachedTest.dropInfo { - v.info.info = nil + v.info.infoFunc = nil } } @@ -589,7 +593,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { bd := &details.Builder{} cachedItems := map[string]testInfo{ suite.targetFileName: { - info: &itemDetails{info: nil, repoPath: suite.targetFilePath}, + info: &itemDetails{repoPath: suite.targetFilePath}, err: nil, totalBytes: 100, }, @@ -654,7 +658,6 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch } deets := &itemDetails{ - info: nil, repoPath: suite.targetFilePath, prevPath: prevPath, locationPath: suite.targetFileLoc, diff --git a/src/internal/m365/service/exchange/mock/collections.go b/src/internal/m365/service/exchange/mock/collections.go index c04e8e8a5..1787352ac 100644 --- a/src/internal/m365/service/exchange/mock/collections.go +++ b/src/internal/m365/service/exchange/mock/collections.go @@ -121,13 +121,16 @@ func (medc *DataCollection) Items( defer close(res) for i := 0; i < medc.messageCount; i++ { + info := StubMailInfo() + info.Exchange.Modified = medc.ModTimes[i] + res <- &dataMock.Item{ ItemID: medc.Names[i], Reader: io.NopCloser(bytes.NewReader(medc.Data[i])), ItemSize: int64(len(medc.Data[i])), ModifiedTime: medc.ModTimes[i], DeletedFlag: medc.DeletedItems[i], - ItemInfo: StubMailInfo(), + ItemInfo: info, } } }()