Redo folder generation in backup details (#3140)
Augment the folder backup details entries with LocationRef and some information about what data type generated the entry. Also add top-level container information like drive name/ID if applicable Refactor code to do folder generation to make it more contained and require less parameters to add entries to backup details Important changes are in details.go. All other changes are just to keep up with the slightly modified function API or update tests --- #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [ ] 🕐 Yes, but in a later PR - [x] ⛔ No #### Type of change - [x] 🌻 Feature - [ ] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [ ] 🧹 Tech Debt/Cleanup #### Issue(s) * closes #3120 * closes #2138 #### Test Plan - [x] 💪 Manual - [x] ⚡ Unit test - [ ] 💚 E2E
This commit is contained in:
parent
372f1d5218
commit
f01e25ad83
@ -471,8 +471,9 @@ func runExchangeDetailsCmdTest(suite *PreparedBackupExchangeE2ESuite, category p
|
||||
i++
|
||||
}
|
||||
|
||||
// At least the prefix of the path should be encoded as folders.
|
||||
assert.Greater(t, foundFolders, 4)
|
||||
// We only backup the default folder for each category so there should be at
|
||||
// least that folder (we don't make details entries for prefix folders).
|
||||
assert.GreaterOrEqual(t, foundFolders, 1)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -166,6 +166,7 @@ func (med *Data) ToReader() io.ReadCloser {
|
||||
func (med *Data) Info() details.ItemInfo {
|
||||
return details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
Sender: "foo@bar.com",
|
||||
Subject: "Hello world!",
|
||||
Received: time.Now(),
|
||||
|
||||
@ -442,15 +442,13 @@ func restoreCollection(
|
||||
continue
|
||||
}
|
||||
|
||||
var locationRef string
|
||||
locationRef := &path.Builder{}
|
||||
if category == path.ContactsCategory {
|
||||
locationRef = itemPath.Folder(false)
|
||||
locationRef = locationRef.Append(itemPath.Folders()...)
|
||||
}
|
||||
|
||||
err = deets.Add(
|
||||
itemPath.String(),
|
||||
itemPath.ShortRef(),
|
||||
"",
|
||||
itemPath,
|
||||
locationRef,
|
||||
true,
|
||||
details.ItemInfo{
|
||||
|
||||
@ -510,7 +510,8 @@ func runRestore(
|
||||
assert.Equal(t, numRestoreItems, status.Successes, "restored status.Successes")
|
||||
assert.Len(
|
||||
t,
|
||||
deets.Entries,
|
||||
// Don't check folders as those are now added to details.
|
||||
deets.Items(),
|
||||
numRestoreItems,
|
||||
"details entries contains same item count as total successful items restored")
|
||||
|
||||
@ -1085,8 +1086,10 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
// Always just 1 because it's just 1 collection.
|
||||
assert.Equal(t, totalItems, status.Objects, "status.Objects")
|
||||
assert.Equal(t, totalItems, status.Successes, "status.Successes")
|
||||
assert.Equal(
|
||||
t, totalItems, len(deets.Entries),
|
||||
assert.Len(
|
||||
t,
|
||||
deets.Items(),
|
||||
totalItems,
|
||||
"details entries contains same item count as total successful items restored")
|
||||
|
||||
t.Log("Restore complete")
|
||||
|
||||
@ -249,10 +249,8 @@ func RestoreCollection(
|
||||
}
|
||||
|
||||
err = deets.Add(
|
||||
itemPath.String(),
|
||||
itemPath.ShortRef(),
|
||||
"",
|
||||
"", // TODO: implement locationRef
|
||||
itemPath,
|
||||
&path.Builder{}, // TODO: implement locationRef
|
||||
true,
|
||||
itemInfo)
|
||||
if err != nil {
|
||||
|
||||
@ -264,10 +264,8 @@ func RestoreListCollection(
|
||||
}
|
||||
|
||||
err = deets.Add(
|
||||
itemPath.String(),
|
||||
itemPath.ShortRef(),
|
||||
"",
|
||||
"", // TODO: implement locationRef
|
||||
itemPath,
|
||||
&path.Builder{}, // TODO: implement locationRef
|
||||
true,
|
||||
itemInfo)
|
||||
if err != nil {
|
||||
@ -354,10 +352,8 @@ func RestorePageCollection(
|
||||
}
|
||||
|
||||
err = deets.Add(
|
||||
itemPath.String(),
|
||||
itemPath.ShortRef(),
|
||||
"",
|
||||
"", // TODO: implement locationRef
|
||||
itemPath,
|
||||
&path.Builder{}, // TODO: implement locationRef
|
||||
true,
|
||||
itemInfo)
|
||||
if err != nil {
|
||||
|
||||
54
src/internal/data/mock/collection.go
Normal file
54
src/internal/data/mock/collection.go
Normal file
@ -0,0 +1,54 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
)
|
||||
|
||||
type Stream struct {
|
||||
ID string
|
||||
Reader io.ReadCloser
|
||||
ReadErr error
|
||||
ItemSize int64
|
||||
ModifiedTime time.Time
|
||||
DeletedFlag bool
|
||||
ItemInfo details.ItemInfo
|
||||
}
|
||||
|
||||
func (s *Stream) UUID() string {
|
||||
return s.ID
|
||||
}
|
||||
|
||||
func (s Stream) Deleted() bool {
|
||||
return s.DeletedFlag
|
||||
}
|
||||
|
||||
func (s *Stream) ToReader() io.ReadCloser {
|
||||
if s.ReadErr != nil {
|
||||
return io.NopCloser(errReader{s.ReadErr})
|
||||
}
|
||||
|
||||
return s.Reader
|
||||
}
|
||||
|
||||
func (s *Stream) Info() details.ItemInfo {
|
||||
return s.ItemInfo
|
||||
}
|
||||
|
||||
func (s *Stream) Size() int64 {
|
||||
return s.ItemSize
|
||||
}
|
||||
|
||||
func (s *Stream) ModTime() time.Time {
|
||||
return s.ModifiedTime
|
||||
}
|
||||
|
||||
type errReader struct {
|
||||
readErr error
|
||||
}
|
||||
|
||||
func (er errReader) Read([]byte) (int, error) {
|
||||
return 0, er.readErr
|
||||
}
|
||||
@ -208,20 +208,9 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
locationFolders string
|
||||
parent = d.repoPath.ToBuilder().Dir()
|
||||
)
|
||||
|
||||
if d.locationPath != nil {
|
||||
locationFolders = d.locationPath.String()
|
||||
}
|
||||
|
||||
err = cp.deets.Add(
|
||||
d.repoPath.String(),
|
||||
d.repoPath.ShortRef(),
|
||||
parent.ShortRef(),
|
||||
locationFolders,
|
||||
d.repoPath,
|
||||
d.locationPath,
|
||||
!d.cached,
|
||||
*d.info)
|
||||
if err != nil {
|
||||
@ -234,12 +223,6 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
folders := details.FolderEntriesForPath(parent, d.locationPath)
|
||||
cp.deets.AddFoldersForItem(
|
||||
folders,
|
||||
*d.info,
|
||||
!d.cached)
|
||||
}
|
||||
|
||||
// Kopia interface function used as a callback when kopia finishes hashing a file.
|
||||
|
||||
@ -386,7 +386,15 @@ var finishedFileTable = []struct {
|
||||
cachedItems: func(fname string, fpath path.Path) map[string]testInfo {
|
||||
return map[string]testInfo{
|
||||
fname: {
|
||||
info: &itemDetails{info: &details.ItemInfo{}, repoPath: fpath},
|
||||
info: &itemDetails{
|
||||
info: &details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
},
|
||||
},
|
||||
repoPath: fpath,
|
||||
locationPath: path.Builder{}.Append(fpath.Folders()...),
|
||||
},
|
||||
err: nil,
|
||||
totalBytes: 100,
|
||||
},
|
||||
@ -394,7 +402,7 @@ var finishedFileTable = []struct {
|
||||
},
|
||||
expectedBytes: 100,
|
||||
// 1 file and 5 folders.
|
||||
expectedNumEntries: 6,
|
||||
expectedNumEntries: 2,
|
||||
},
|
||||
{
|
||||
name: "PendingNoDetails",
|
||||
@ -413,8 +421,15 @@ var finishedFileTable = []struct {
|
||||
cachedItems: func(fname string, fpath path.Path) map[string]testInfo {
|
||||
return map[string]testInfo{
|
||||
fname: {
|
||||
info: &itemDetails{info: &details.ItemInfo{}, repoPath: fpath},
|
||||
err: assert.AnError,
|
||||
info: &itemDetails{
|
||||
info: &details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
},
|
||||
},
|
||||
repoPath: fpath,
|
||||
},
|
||||
err: assert.AnError,
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -521,71 +536,6 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
assert.Error(t, cp.errs.Failure(), clues.ToCore(cp.errs.Failure()))
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
t := suite.T()
|
||||
// Order of folders in hierarchy from root to leaf (excluding the item).
|
||||
expectedFolderOrder := suite.targetFilePath.ToBuilder().Dir().Elements()
|
||||
|
||||
// Setup stuff.
|
||||
bd := &details.Builder{}
|
||||
cp := corsoProgress{
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
deets := &itemDetails{info: &details.ItemInfo{}, repoPath: suite.targetFilePath}
|
||||
cp.put(suite.targetFileName, deets)
|
||||
require.Len(t, cp.pending, 1)
|
||||
|
||||
cp.FinishedFile(suite.targetFileName, nil)
|
||||
|
||||
assert.Equal(t, 0, cp.toMerge.ItemsToMerge())
|
||||
|
||||
// Gather information about the current state.
|
||||
var (
|
||||
curRef *details.DetailsEntry
|
||||
refToEntry = map[string]*details.DetailsEntry{}
|
||||
)
|
||||
|
||||
entries := bd.Details().Entries
|
||||
|
||||
for i := 0; i < len(entries); i++ {
|
||||
e := &entries[i]
|
||||
if e.Folder == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
refToEntry[e.ShortRef] = e
|
||||
|
||||
if e.Folder.DisplayName == expectedFolderOrder[len(expectedFolderOrder)-1] {
|
||||
curRef = e
|
||||
}
|
||||
}
|
||||
|
||||
// Actual tests start here.
|
||||
var rootRef *details.DetailsEntry
|
||||
|
||||
// Traverse the details entries from leaf to root, following the ParentRef
|
||||
// fields. At the end rootRef should point to the root of the path.
|
||||
for i := len(expectedFolderOrder) - 1; i >= 0; i-- {
|
||||
name := expectedFolderOrder[i]
|
||||
|
||||
require.NotNil(t, curRef)
|
||||
assert.Equal(t, name, curRef.Folder.DisplayName)
|
||||
|
||||
rootRef = curRef
|
||||
curRef = refToEntry[curRef.ParentRef]
|
||||
}
|
||||
|
||||
// Hierarchy root's ParentRef = "" and map will return nil.
|
||||
assert.Nil(t, curRef)
|
||||
require.NotNil(t, rootRef)
|
||||
assert.Empty(t, rootRef.ParentRef)
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarchy() {
|
||||
type expectedRef struct {
|
||||
oldRef *path.Builder
|
||||
|
||||
@ -20,7 +20,9 @@ import (
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -294,12 +296,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||
assert.Equal(t, 0, stats.ErrorCount)
|
||||
assert.False(t, stats.Incomplete)
|
||||
|
||||
// 47 file and 6 folder entries.
|
||||
// 47 file and 2 folder entries.
|
||||
details := deets.Details().Entries
|
||||
assert.Len(
|
||||
t,
|
||||
details,
|
||||
test.expectedUploadedFiles+test.expectedCachedFiles+6,
|
||||
test.expectedUploadedFiles+test.expectedCachedFiles+2,
|
||||
)
|
||||
|
||||
for _, entry := range details {
|
||||
@ -331,6 +333,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): This should really be moved to an e2e test that just checks
|
||||
// details for certain things.
|
||||
func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
tmp, err := path.Build(
|
||||
testTenant,
|
||||
@ -342,7 +346,14 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
storePath := tmp
|
||||
locPath := tmp
|
||||
locPath := path.Builder{}.Append(tmp.Folders()...)
|
||||
|
||||
baseOneDriveItemInfo := details.OneDriveInfo{
|
||||
ItemType: details.OneDriveItem,
|
||||
DriveID: "drive-id",
|
||||
DriveName: "drive-name",
|
||||
ItemName: "item",
|
||||
}
|
||||
|
||||
// tags that are supplied by the caller. This includes basic tags to support
|
||||
// lookups and extra tags the caller may want to apply.
|
||||
@ -385,13 +396,32 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
numDeetsEntries: 3,
|
||||
hasMetaDeets: true,
|
||||
cols: func() []data.BackupCollection {
|
||||
mc := exchMock.NewCollection(
|
||||
storePath,
|
||||
locPath,
|
||||
3)
|
||||
mc.Names[0] = testFileName
|
||||
mc.Names[1] = testFileName + metadata.MetaFileSuffix
|
||||
mc.Names[2] = storePath.Folders()[0] + metadata.DirMetaFileSuffix
|
||||
streams := []data.Stream{}
|
||||
fileNames := []string{
|
||||
testFileName,
|
||||
testFileName + metadata.MetaFileSuffix,
|
||||
metadata.DirMetaFileSuffix,
|
||||
}
|
||||
|
||||
for _, name := range fileNames {
|
||||
info := baseOneDriveItemInfo
|
||||
info.ItemName = name
|
||||
|
||||
ms := &mock.Stream{
|
||||
ID: name,
|
||||
Reader: io.NopCloser(&bytes.Buffer{}),
|
||||
ItemSize: 0,
|
||||
ItemInfo: details.ItemInfo{OneDrive: &info},
|
||||
}
|
||||
|
||||
streams = append(streams, ms)
|
||||
}
|
||||
|
||||
mc := &mockBackupCollection{
|
||||
path: storePath,
|
||||
loc: locPath,
|
||||
streams: streams,
|
||||
}
|
||||
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
@ -404,12 +434,22 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
numDeetsEntries: 1,
|
||||
hasMetaDeets: false,
|
||||
cols: func() []data.BackupCollection {
|
||||
mc := exchMock.NewCollection(
|
||||
storePath,
|
||||
locPath,
|
||||
1)
|
||||
mc.Names[0] = testFileName
|
||||
mc.ColState = data.NotMovedState
|
||||
info := baseOneDriveItemInfo
|
||||
info.ItemName = testFileName
|
||||
|
||||
ms := &mock.Stream{
|
||||
ID: testFileName,
|
||||
Reader: io.NopCloser(&bytes.Buffer{}),
|
||||
ItemSize: 0,
|
||||
ItemInfo: details.ItemInfo{OneDrive: &info},
|
||||
}
|
||||
|
||||
mc := &mockBackupCollection{
|
||||
path: storePath,
|
||||
loc: locPath,
|
||||
streams: []data.Stream{ms},
|
||||
state: data.NotMovedState,
|
||||
}
|
||||
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
@ -441,12 +481,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
assert.Equal(t, 0, stats.ErrorCount)
|
||||
assert.False(t, stats.Incomplete)
|
||||
|
||||
// 47 file and 5 folder entries.
|
||||
// 47 file and 1 folder entries.
|
||||
details := deets.Details().Entries
|
||||
assert.Len(
|
||||
t,
|
||||
details,
|
||||
test.numDeetsEntries+5,
|
||||
test.numDeetsEntries+1,
|
||||
)
|
||||
|
||||
for _, entry := range details {
|
||||
@ -461,7 +501,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
|
||||
// Shouldn't have any items to merge because the cached files are metadata
|
||||
// files.
|
||||
assert.Equal(t, 0, prevShortRefs.ItemsToMerge())
|
||||
assert.Equal(t, 0, prevShortRefs.ItemsToMerge(), "merge items")
|
||||
|
||||
checkSnapshotTags(
|
||||
t,
|
||||
@ -556,7 +596,9 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
|
||||
type mockBackupCollection struct {
|
||||
path path.Path
|
||||
loc *path.Builder
|
||||
streams []data.Stream
|
||||
state data.CollectionState
|
||||
}
|
||||
|
||||
func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
|
||||
@ -581,8 +623,12 @@ func (c mockBackupCollection) PreviousPath() path.Path {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) LocationPath() *path.Builder {
|
||||
return c.loc
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) State() data.CollectionState {
|
||||
return data.NewState
|
||||
return c.state
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) DoNotMergeItems() bool {
|
||||
@ -592,6 +638,8 @@ func (c mockBackupCollection) DoNotMergeItems() bool {
|
||||
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
t := suite.T()
|
||||
|
||||
loc1 := path.Builder{}.Append(suite.storePath1.Folders()...)
|
||||
loc2 := path.Builder{}.Append(suite.storePath2.Folders()...)
|
||||
tags := map[string]string{}
|
||||
reason := Reason{
|
||||
ResourceOwner: testUser,
|
||||
@ -606,6 +654,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
collections := []data.BackupCollection{
|
||||
&mockBackupCollection{
|
||||
path: suite.storePath1,
|
||||
loc: loc1,
|
||||
streams: []data.Stream{
|
||||
&exchMock.Data{
|
||||
ID: testFileName,
|
||||
@ -619,6 +668,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
},
|
||||
&mockBackupCollection{
|
||||
path: suite.storePath2,
|
||||
loc: loc2,
|
||||
streams: []data.Stream{
|
||||
&exchMock.Data{
|
||||
ID: testFileName3,
|
||||
@ -654,8 +704,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
assert.Equal(t, 6, stats.TotalDirectoryCount)
|
||||
assert.Equal(t, 1, stats.IgnoredErrorCount)
|
||||
assert.False(t, stats.Incomplete)
|
||||
// 5 file and 6 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, 5+6)
|
||||
// 5 file and 2 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, 5+2)
|
||||
|
||||
failedPath, err := suite.storePath2.Append(testFileName4, true)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -836,7 +886,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
collections := []data.BackupCollection{}
|
||||
|
||||
for _, parent := range []path.Path{suite.testPath1, suite.testPath2} {
|
||||
collection := &mockBackupCollection{path: parent}
|
||||
loc := path.Builder{}.Append(parent.Folders()...)
|
||||
collection := &mockBackupCollection{path: parent, loc: loc}
|
||||
|
||||
for _, item := range suite.files[parent.String()] {
|
||||
collection.streams = append(
|
||||
@ -876,8 +927,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
|
||||
require.Equal(t, stats.IgnoredErrorCount, 0)
|
||||
require.False(t, stats.Incomplete)
|
||||
// 6 file and 6 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, expectedFiles+expectedDirs)
|
||||
// 6 file and 2 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, expectedFiles+2)
|
||||
|
||||
suite.snapshotID = manifest.ID(stats.SnapshotID)
|
||||
}
|
||||
|
||||
@ -670,19 +670,14 @@ func mergeDetails(
|
||||
itemUpdated := newPath.String() != rr.String() || locUpdated
|
||||
|
||||
err = deets.Add(
|
||||
newPath.String(),
|
||||
newPath.ShortRef(),
|
||||
newPath.ToBuilder().Dir().ShortRef(),
|
||||
newLoc.String(),
|
||||
newPath,
|
||||
newLoc,
|
||||
itemUpdated,
|
||||
item)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "adding item to details")
|
||||
}
|
||||
|
||||
folders := details.FolderEntriesForPath(newPath.ToBuilder().Dir(), newLoc)
|
||||
deets.AddFoldersForItem(folders, item, itemUpdated)
|
||||
|
||||
// Track how many entries we added so that we know if we got them all when
|
||||
// we're done.
|
||||
addedEntries++
|
||||
|
||||
@ -267,9 +267,10 @@ func makeMetadataPath(
|
||||
|
||||
func makeFolderEntry(
|
||||
t *testing.T,
|
||||
pb *path.Builder,
|
||||
pb, loc *path.Builder,
|
||||
size int64,
|
||||
modTime time.Time,
|
||||
dt details.ItemType,
|
||||
) *details.DetailsEntry {
|
||||
t.Helper()
|
||||
|
||||
@ -277,13 +278,14 @@ func makeFolderEntry(
|
||||
RepoRef: pb.String(),
|
||||
ShortRef: pb.ShortRef(),
|
||||
ParentRef: pb.Dir().ShortRef(),
|
||||
LocationRef: pb.PopFront().PopFront().PopFront().PopFront().Dir().String(),
|
||||
LocationRef: loc.Dir().String(),
|
||||
ItemInfo: details.ItemInfo{
|
||||
Folder: &details.FolderInfo{
|
||||
ItemType: details.FolderItem,
|
||||
DisplayName: pb.Elements()[len(pb.Elements())-1],
|
||||
DisplayName: pb.LastElem(),
|
||||
Modified: modTime,
|
||||
Size: size,
|
||||
DataType: dt,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -347,6 +349,7 @@ func makeDetailsEntry(
|
||||
ParentPath: l.PopFront().String(),
|
||||
Size: int64(size),
|
||||
DriveID: "drive-id",
|
||||
DriveName: "drive-name",
|
||||
}
|
||||
|
||||
default:
|
||||
@ -1210,6 +1213,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
ro,
|
||||
path.EmailCategory.String(),
|
||||
"work",
|
||||
"project8",
|
||||
"item1",
|
||||
}
|
||||
|
||||
@ -1218,7 +1222,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
pathElems,
|
||||
true)
|
||||
|
||||
locPath1 = path.Builder{}.Append(pathElems[:len(pathElems)-1]...)
|
||||
locPath1 = path.Builder{}.Append(itemPath1.Folders()...)
|
||||
|
||||
backup1 = backup.Backup{
|
||||
BaseModel: model.BaseModel{
|
||||
@ -1270,12 +1274,15 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
// update the details
|
||||
itemDetails.Exchange.Modified = now
|
||||
|
||||
for i := 1; i < len(pathElems); i++ {
|
||||
for i := 1; i <= len(locPath1.Elements()); i++ {
|
||||
expectedEntries = append(expectedEntries, *makeFolderEntry(
|
||||
t,
|
||||
path.Builder{}.Append(pathElems[:i]...),
|
||||
// Include prefix elements in the RepoRef calculations.
|
||||
path.Builder{}.Append(pathElems[:4+i]...),
|
||||
path.Builder{}.Append(locPath1.Elements()[:i]...),
|
||||
int64(itemSize),
|
||||
itemDetails.Exchange.Modified))
|
||||
itemDetails.Exchange.Modified,
|
||||
details.ExchangeMail))
|
||||
}
|
||||
|
||||
ctx, flush := tester.NewContext()
|
||||
@ -1302,7 +1309,10 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
// compares two details slices. Useful for tests where serializing the
|
||||
// entries can produce minor variations in the time struct, causing
|
||||
// assert.elementsMatch to fail.
|
||||
func compareDeetEntries(t *testing.T, expect, result []details.DetailsEntry) {
|
||||
func compareDeetEntries(
|
||||
t *testing.T,
|
||||
expect, result []details.DetailsEntry,
|
||||
) {
|
||||
if !assert.Equal(t, len(expect), len(result), "entry slices should be equal len") {
|
||||
require.ElementsMatch(t, expect, result)
|
||||
}
|
||||
|
||||
@ -463,7 +463,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
||||
require.NotEmpty(t, ro.Results, "restoreOp results")
|
||||
require.NotNil(t, ds, "restored details")
|
||||
assert.Equal(t, ro.Status, Completed, "restoreOp status")
|
||||
assert.Equal(t, ro.Results.ItemsWritten, len(ds.Entries), "item write count matches len details")
|
||||
assert.Equal(t, ro.Results.ItemsWritten, len(ds.Items()), "item write count matches len details")
|
||||
assert.Less(t, 0, ro.Results.ItemsRead, "restore items read")
|
||||
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
||||
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
||||
|
||||
@ -64,6 +64,11 @@ func (suite *StreamStoreIntgSuite) TearDownSubTest() {
|
||||
}
|
||||
|
||||
func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
deetsPath, err := path.FromDataLayerPath("tenant-id/exchange/user-id/email/Inbox/folder1/foo", true)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
locPath := path.Builder{}.Append(deetsPath.Folders()...)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
deets func(*testing.T) *details.Details
|
||||
@ -81,12 +86,15 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
deets: func(t *testing.T) *details.Details {
|
||||
deetsBuilder := &details.Builder{}
|
||||
require.NoError(t, deetsBuilder.Add(
|
||||
"rr", "sr", "pr", "lr",
|
||||
deetsPath,
|
||||
locPath,
|
||||
true,
|
||||
details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{Subject: "hello world"},
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
Subject: "hello world",
|
||||
},
|
||||
}))
|
||||
|
||||
return deetsBuilder.Details()
|
||||
},
|
||||
errs: func() *fault.Errors { return nil },
|
||||
@ -112,10 +120,14 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
deets: func(t *testing.T) *details.Details {
|
||||
deetsBuilder := &details.Builder{}
|
||||
require.NoError(t, deetsBuilder.Add(
|
||||
"rr", "sr", "pr", "lr",
|
||||
deetsPath,
|
||||
locPath,
|
||||
true,
|
||||
details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{Subject: "hello world"},
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
Subject: "hello world",
|
||||
},
|
||||
}))
|
||||
|
||||
return deetsBuilder.Details()
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/dustin/go-humanize"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
@ -40,6 +41,35 @@ func (ul uniqueLoc) InDetails() *path.Builder {
|
||||
return path.Builder{}.Append(ul.pb.Elements()[ul.prefixElems:]...)
|
||||
}
|
||||
|
||||
// elementCount returns the number of non-prefix elements in the LocationIDer
|
||||
// (i.e. the number of elements in the InDetails path.Builder).
|
||||
func (ul uniqueLoc) elementCount() int {
|
||||
res := len(ul.pb.Elements()) - ul.prefixElems
|
||||
if res < 0 {
|
||||
res = 0
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (ul *uniqueLoc) dir() {
|
||||
if ul.elementCount() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ul.pb = ul.pb.Dir()
|
||||
}
|
||||
|
||||
// lastElem returns the unescaped last element in the location. If the location
|
||||
// is empty returns an empty string.
|
||||
func (ul uniqueLoc) lastElem() string {
|
||||
if ul.elementCount() == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return ul.pb.LastElem()
|
||||
}
|
||||
|
||||
// Having service-specific constructors can be kind of clunky, but in this case
|
||||
// I think they'd be useful to ensure the proper args are used since this
|
||||
// path.Builder is used as a key in some maps.
|
||||
@ -95,15 +125,6 @@ func NewSharePointLocationIDer(
|
||||
}
|
||||
}
|
||||
|
||||
type folderEntry struct {
|
||||
RepoRef string
|
||||
ShortRef string
|
||||
ParentRef string
|
||||
LocationRef string
|
||||
Updated bool
|
||||
Info ItemInfo
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Model
|
||||
// --------------------------------------------------------------------------------
|
||||
@ -218,19 +239,117 @@ func (de DetailsEntry) isMetaFile() bool {
|
||||
// Builder should be used to create a details model.
|
||||
type Builder struct {
|
||||
d Details
|
||||
mu sync.Mutex `json:"-"`
|
||||
knownFolders map[string]folderEntry `json:"-"`
|
||||
mu sync.Mutex `json:"-"`
|
||||
knownFolders map[string]DetailsEntry `json:"-"`
|
||||
}
|
||||
|
||||
func (b *Builder) Add(
|
||||
repoRef, shortRef, parentRef, locationRef string,
|
||||
repoRef path.Path,
|
||||
locationRef *path.Builder,
|
||||
updated bool,
|
||||
info ItemInfo,
|
||||
) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
return b.d.add(repoRef, shortRef, parentRef, locationRef, updated, info)
|
||||
entry, err := b.d.add(
|
||||
repoRef,
|
||||
locationRef,
|
||||
updated,
|
||||
info)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "adding entry to details")
|
||||
}
|
||||
|
||||
if err := b.addFolderEntries(
|
||||
repoRef.ToBuilder().Dir(),
|
||||
locationRef,
|
||||
entry,
|
||||
); err != nil {
|
||||
return clues.Wrap(err, "adding folder entries")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) addFolderEntries(
|
||||
repoRef, locationRef *path.Builder,
|
||||
entry DetailsEntry,
|
||||
) error {
|
||||
if len(repoRef.Elements()) < len(locationRef.Elements()) {
|
||||
return clues.New("RepoRef shorter than LocationRef").
|
||||
With("repo_ref", repoRef, "location_ref", locationRef)
|
||||
}
|
||||
|
||||
if b.knownFolders == nil {
|
||||
b.knownFolders = map[string]DetailsEntry{}
|
||||
}
|
||||
|
||||
// Need a unique location because we want to have separate folders for
|
||||
// different drives and categories even if there's duplicate folder names in
|
||||
// them.
|
||||
uniqueLoc, err := entry.uniqueLocation(locationRef)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting LocationIDer")
|
||||
}
|
||||
|
||||
for uniqueLoc.elementCount() > 0 {
|
||||
mapKey := uniqueLoc.ID().ShortRef()
|
||||
|
||||
name := uniqueLoc.lastElem()
|
||||
if len(name) == 0 {
|
||||
return clues.New("folder with no display name").
|
||||
With("repo_ref", repoRef, "location_ref", uniqueLoc.InDetails())
|
||||
}
|
||||
|
||||
shortRef := repoRef.ShortRef()
|
||||
rr := repoRef.String()
|
||||
|
||||
// Get the parent of this entry to add as the LocationRef for the folder.
|
||||
uniqueLoc.dir()
|
||||
|
||||
repoRef = repoRef.Dir()
|
||||
parentRef := repoRef.ShortRef()
|
||||
|
||||
folder, ok := b.knownFolders[mapKey]
|
||||
if !ok {
|
||||
loc := uniqueLoc.InDetails().String()
|
||||
|
||||
folder = DetailsEntry{
|
||||
RepoRef: rr,
|
||||
ShortRef: shortRef,
|
||||
ParentRef: parentRef,
|
||||
LocationRef: loc,
|
||||
ItemInfo: ItemInfo{
|
||||
Folder: &FolderInfo{
|
||||
ItemType: FolderItem,
|
||||
// TODO(ashmrtn): Use the item type returned by the entry once
|
||||
// SharePoint properly sets it.
|
||||
DisplayName: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := entry.updateFolder(folder.Folder); err != nil {
|
||||
return clues.Wrap(err, "adding folder").
|
||||
With("parent_repo_ref", repoRef, "location_ref", loc)
|
||||
}
|
||||
}
|
||||
|
||||
folder.Folder.Size += entry.size()
|
||||
folder.Updated = folder.Updated || entry.Updated
|
||||
|
||||
itemModified := entry.Modified()
|
||||
if folder.Folder.Modified.Before(itemModified) {
|
||||
folder.Folder.Modified = itemModified
|
||||
}
|
||||
|
||||
// Always update the map because we're storing structs not pointers to
|
||||
// structs.
|
||||
b.knownFolders[mapKey] = folder
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) Details() *Details {
|
||||
@ -238,96 +357,11 @@ func (b *Builder) Details() *Details {
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Write the cached folder entries to details
|
||||
for _, folder := range b.knownFolders {
|
||||
b.d.addFolder(folder)
|
||||
}
|
||||
b.d.Entries = append(b.d.Entries, maps.Values(b.knownFolders)...)
|
||||
|
||||
return &b.d
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): If we never need to pre-populate the modified time of a folder
|
||||
// we should just merge this with AddFoldersForItem, have Add call
|
||||
// AddFoldersForItem, and unexport AddFoldersForItem.
|
||||
func FolderEntriesForPath(parent, location *path.Builder) []folderEntry {
|
||||
folders := []folderEntry{}
|
||||
lfs := location
|
||||
|
||||
for len(parent.Elements()) > 0 {
|
||||
var (
|
||||
nextParent = parent.Dir()
|
||||
lr string
|
||||
dn = parent.LastElem()
|
||||
)
|
||||
|
||||
// TODO: We may have future cases where the storage hierarchy
|
||||
// doesn't match the location hierarchy.
|
||||
if lfs != nil {
|
||||
lr = lfs.String()
|
||||
|
||||
if len(lfs.Elements()) > 0 {
|
||||
dn = lfs.LastElem()
|
||||
}
|
||||
}
|
||||
|
||||
folders = append(folders, folderEntry{
|
||||
RepoRef: parent.String(),
|
||||
ShortRef: parent.ShortRef(),
|
||||
ParentRef: nextParent.ShortRef(),
|
||||
LocationRef: lr,
|
||||
Info: ItemInfo{
|
||||
Folder: &FolderInfo{
|
||||
ItemType: FolderItem,
|
||||
DisplayName: dn,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
parent = nextParent
|
||||
|
||||
if lfs != nil {
|
||||
lfs = lfs.Dir()
|
||||
}
|
||||
}
|
||||
|
||||
return folders
|
||||
}
|
||||
|
||||
// AddFoldersForItem adds entries for the given folders. It skips adding entries that
|
||||
// have been added by previous calls.
|
||||
func (b *Builder) AddFoldersForItem(folders []folderEntry, itemInfo ItemInfo, updated bool) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.knownFolders == nil {
|
||||
b.knownFolders = map[string]folderEntry{}
|
||||
}
|
||||
|
||||
for _, folder := range folders {
|
||||
if existing, ok := b.knownFolders[folder.ShortRef]; ok {
|
||||
// We've seen this folder before for a different item.
|
||||
// Update the "cached" folder entry
|
||||
folder = existing
|
||||
}
|
||||
|
||||
// Update the folder's size and modified time
|
||||
itemModified := itemInfo.Modified()
|
||||
|
||||
folder.Info.Folder.Size += itemInfo.size()
|
||||
|
||||
if folder.Info.Folder.Modified.Before(itemModified) {
|
||||
folder.Info.Folder.Modified = itemModified
|
||||
}
|
||||
|
||||
// If the item being added was "updated" - propagate that to the
|
||||
// folder entries
|
||||
if updated {
|
||||
folder.Updated = true
|
||||
}
|
||||
|
||||
b.knownFolders[folder.ShortRef] = folder
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Details
|
||||
// --------------------------------------------------------------------------------
|
||||
@ -340,15 +374,20 @@ type Details struct {
|
||||
}
|
||||
|
||||
func (d *Details) add(
|
||||
repoRef, shortRef, parentRef, locationRef string,
|
||||
repoRef path.Path,
|
||||
locationRef *path.Builder,
|
||||
updated bool,
|
||||
info ItemInfo,
|
||||
) error {
|
||||
) (DetailsEntry, error) {
|
||||
if locationRef == nil {
|
||||
return DetailsEntry{}, clues.New("nil LocationRef").With("repo_ref", repoRef)
|
||||
}
|
||||
|
||||
entry := DetailsEntry{
|
||||
RepoRef: repoRef,
|
||||
ShortRef: shortRef,
|
||||
ParentRef: parentRef,
|
||||
LocationRef: locationRef,
|
||||
RepoRef: repoRef.String(),
|
||||
ShortRef: repoRef.ShortRef(),
|
||||
ParentRef: repoRef.ToBuilder().Dir().ShortRef(),
|
||||
LocationRef: locationRef.String(),
|
||||
Updated: updated,
|
||||
ItemInfo: info,
|
||||
}
|
||||
@ -356,13 +395,8 @@ func (d *Details) add(
|
||||
// Use the item name and the path for the ShortRef. This ensures that renames
|
||||
// within a directory generate unique ShortRefs.
|
||||
if info.infoType() == OneDriveItem || info.infoType() == SharePointLibrary {
|
||||
p, err := path.FromDataLayerPath(repoRef, true)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "munging OneDrive or SharePoint ShortRef")
|
||||
}
|
||||
|
||||
if info.OneDrive == nil && info.SharePoint == nil {
|
||||
return clues.New("item is not SharePoint or OneDrive type")
|
||||
return entry, clues.New("item is not SharePoint or OneDrive type")
|
||||
}
|
||||
|
||||
filename := ""
|
||||
@ -381,25 +415,14 @@ func (d *Details) add(
|
||||
// M365 ID of this file and also have a subfolder in the folder with a
|
||||
// display name that matches the file's display name. That would result in
|
||||
// duplicate ShortRefs, which we can't allow.
|
||||
elements := p.Elements()
|
||||
elements = append(elements[:len(elements)-1], filename, p.Item())
|
||||
elements := repoRef.Elements()
|
||||
elements = append(elements[:len(elements)-1], filename, repoRef.Item())
|
||||
entry.ShortRef = path.Builder{}.Append(elements...).ShortRef()
|
||||
}
|
||||
|
||||
d.Entries = append(d.Entries, entry)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addFolder adds an entry for the given folder.
|
||||
func (d *Details) addFolder(folder folderEntry) {
|
||||
d.Entries = append(d.Entries, DetailsEntry{
|
||||
RepoRef: folder.RepoRef,
|
||||
ShortRef: folder.ShortRef,
|
||||
ParentRef: folder.ParentRef,
|
||||
ItemInfo: folder.Info,
|
||||
Updated: folder.Updated,
|
||||
})
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Marshal complies with the marshaller interface in streamStore.
|
||||
@ -666,7 +689,7 @@ func (i ItemInfo) Modified() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (i ItemInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, error) {
|
||||
func (i ItemInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) {
|
||||
switch {
|
||||
case i.Exchange != nil:
|
||||
return i.Exchange.uniqueLocation(baseLoc)
|
||||
@ -682,11 +705,30 @@ func (i ItemInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (i ItemInfo) updateFolder(f *FolderInfo) error {
|
||||
switch {
|
||||
case i.Exchange != nil:
|
||||
return i.Exchange.updateFolder(f)
|
||||
|
||||
case i.OneDrive != nil:
|
||||
return i.OneDrive.updateFolder(f)
|
||||
|
||||
case i.SharePoint != nil:
|
||||
return i.SharePoint.updateFolder(f)
|
||||
|
||||
default:
|
||||
return clues.New("unsupported type")
|
||||
}
|
||||
}
|
||||
|
||||
type FolderInfo struct {
|
||||
ItemType ItemType `json:"itemType,omitempty"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Modified time.Time `json:"modified,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
DataType ItemType `json:"dataType,omitempty"`
|
||||
DriveName string `json:"driveName,omitempty"`
|
||||
DriveID string `json:"driveID,omitempty"`
|
||||
}
|
||||
|
||||
func (i FolderInfo) Headers() []string {
|
||||
@ -762,7 +804,7 @@ func (i *ExchangeInfo) UpdateParentPath(newLocPath *path.Builder) {
|
||||
i.ParentPath = newLocPath.String()
|
||||
}
|
||||
|
||||
func (i *ExchangeInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, error) {
|
||||
func (i *ExchangeInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) {
|
||||
var category path.CategoryType
|
||||
|
||||
switch i.ItemType {
|
||||
@ -774,7 +816,24 @@ func (i *ExchangeInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, erro
|
||||
category = path.EmailCategory
|
||||
}
|
||||
|
||||
return NewExchangeLocationIDer(category, baseLoc.Elements()...)
|
||||
loc, err := NewExchangeLocationIDer(category, baseLoc.Elements()...)
|
||||
|
||||
return &loc, err
|
||||
}
|
||||
|
||||
func (i *ExchangeInfo) updateFolder(f *FolderInfo) error {
|
||||
// Use a switch instead of a rather large if-statement. Just make sure it's an
|
||||
// Exchange type. If it's not return an error.
|
||||
switch i.ItemType {
|
||||
case ExchangeContact, ExchangeEvent, ExchangeMail:
|
||||
default:
|
||||
return clues.New("unsupported non-Exchange ItemType").
|
||||
With("item_type", i.ItemType)
|
||||
}
|
||||
|
||||
f.DataType = i.ItemType
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SharePointInfo describes a sharepoint item
|
||||
@ -815,12 +874,24 @@ func (i *SharePointInfo) UpdateParentPath(newLocPath *path.Builder) {
|
||||
i.ParentPath = newLocPath.PopFront().String()
|
||||
}
|
||||
|
||||
func (i *SharePointInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, error) {
|
||||
func (i *SharePointInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) {
|
||||
if len(i.DriveID) == 0 {
|
||||
return nil, clues.New("empty drive ID")
|
||||
}
|
||||
|
||||
return NewSharePointLocationIDer(i.DriveID, baseLoc.Elements()...), nil
|
||||
loc := NewSharePointLocationIDer(i.DriveID, baseLoc.Elements()...)
|
||||
|
||||
return &loc, nil
|
||||
}
|
||||
|
||||
func (i *SharePointInfo) updateFolder(f *FolderInfo) error {
|
||||
// TODO(ashmrtn): Change to just SharePointLibrary when the code that
|
||||
// generates the item type is fixed.
|
||||
if i.ItemType == OneDriveItem || i.ItemType == SharePointLibrary {
|
||||
return updateFolderWithinDrive(SharePointLibrary, i.DriveName, i.DriveID, f)
|
||||
}
|
||||
|
||||
return clues.New("unsupported non-SharePoint ItemType").With("item_type", i.ItemType)
|
||||
}
|
||||
|
||||
// OneDriveInfo describes a oneDrive item
|
||||
@ -860,10 +931,34 @@ func (i *OneDriveInfo) UpdateParentPath(newLocPath *path.Builder) {
|
||||
i.ParentPath = newLocPath.PopFront().String()
|
||||
}
|
||||
|
||||
func (i *OneDriveInfo) uniqueLocation(baseLoc *path.Builder) (LocationIDer, error) {
|
||||
func (i *OneDriveInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) {
|
||||
if len(i.DriveID) == 0 {
|
||||
return nil, clues.New("empty drive ID")
|
||||
}
|
||||
|
||||
return NewOneDriveLocationIDer(i.DriveID, baseLoc.Elements()...), nil
|
||||
loc := NewOneDriveLocationIDer(i.DriveID, baseLoc.Elements()...)
|
||||
|
||||
return &loc, nil
|
||||
}
|
||||
|
||||
func (i *OneDriveInfo) updateFolder(f *FolderInfo) error {
|
||||
return updateFolderWithinDrive(OneDriveItem, i.DriveName, i.DriveID, f)
|
||||
}
|
||||
|
||||
func updateFolderWithinDrive(
|
||||
t ItemType,
|
||||
driveName, driveID string,
|
||||
f *FolderInfo,
|
||||
) error {
|
||||
if len(driveName) == 0 {
|
||||
return clues.New("empty drive name")
|
||||
} else if len(driveID) == 0 {
|
||||
return clues.New("empty drive ID")
|
||||
}
|
||||
|
||||
f.DriveName = driveName
|
||||
f.DriveID = driveID
|
||||
f.DataType = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -20,6 +20,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
"github.com/alcionai/corso/src/pkg/store/mock"
|
||||
@ -335,13 +336,18 @@ func (suite *RepositoryModelIntgSuite) TestGetBackupDetails() {
|
||||
)
|
||||
|
||||
info := details.ItemInfo{
|
||||
Folder: &details.FolderInfo{
|
||||
DisplayName: "test",
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
},
|
||||
}
|
||||
|
||||
repoPath, err := path.FromDataLayerPath(tenantID+"/exchange/user-id/email/test/foo", true)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
loc := path.Builder{}.Append(repoPath.Folders()...)
|
||||
|
||||
builder := &details.Builder{}
|
||||
require.NoError(suite.T(), builder.Add("ref", "short", "pref", "lref", false, info))
|
||||
require.NoError(suite.T(), builder.Add(repoPath, loc, false, info))
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
@ -411,15 +417,19 @@ func (suite *RepositoryModelIntgSuite) TestGetBackupErrors() {
|
||||
item = fault.FileErr(err, "file-id", "file-name", map[string]any{"foo": "bar"})
|
||||
skip = fault.FileSkip(fault.SkipMalware, "s-file-id", "s-file-name", map[string]any{"foo": "bar"})
|
||||
info = details.ItemInfo{
|
||||
Folder: &details.FolderInfo{
|
||||
DisplayName: "test",
|
||||
Exchange: &details.ExchangeInfo{
|
||||
ItemType: details.ExchangeMail,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
builder := &details.Builder{}
|
||||
repoPath, err2 := path.FromDataLayerPath(tenantID+"/exchange/user-id/email/test/foo", true)
|
||||
require.NoError(suite.T(), err2, clues.ToCore(err2))
|
||||
|
||||
require.NoError(suite.T(), builder.Add("ref", "short", "pref", "lref", false, info))
|
||||
loc := path.Builder{}.Append(repoPath.Folders()...)
|
||||
|
||||
builder := &details.Builder{}
|
||||
require.NoError(suite.T(), builder.Add(repoPath, loc, false, info))
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user