Ensure calling Details() twice does not duplicate folder names (#3556)
Related to https://github.com/alcionai/corso/pull/3140 . We were editing the deets in place and returning a pointer. This is fine as long as we were only calling it once, but https://github.com/alcionai/corso/pull/3480 created an additional call for this which causes the `konwnFolders` to be appended twice. Old backups will still have incorrect data, but an incremental backup on top of it should fix up the data. --- #### Does this PR need a docs update or release note? - [x] ✅ Yes, it's included - [ ] 🕐 Yes, but in a later PR - [ ] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [ ] 🌻 Feature - [x] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [ ] 🧹 Tech Debt/Cleanup #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * Fixes https://github.com/alcionai/corso/issues/3555 #### Test Plan <!-- How will this be tested prior to merging.--> - [ ] 💪 Manual - [x] ⚡ Unit test - [ ] 💚 E2E
This commit is contained in:
parent
54442a3ca7
commit
6b7502bcb8
@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
### Fixed
|
### Fixed
|
||||||
- Fix Exchange folder cache population error when parent folder isn't found.
|
- Fix Exchange folder cache population error when parent folder isn't found.
|
||||||
- Fix Exchange backup issue caused by incorrect json serialization
|
- Fix Exchange backup issue caused by incorrect json serialization
|
||||||
|
- Fix issues with details model containing duplicate entry for api consumers
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Do not display all the items that we restored at the end if there are more than 15. You can override this with `--verbose`.
|
- Do not display all the items that we restored at the end if there are more than 15. You can override this with `--verbose`.
|
||||||
|
|||||||
@ -388,10 +388,17 @@ func (b *Builder) Details() *Details {
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
// Write the cached folder entries to details
|
ents := make([]Entry, len(b.d.Entries))
|
||||||
b.d.Entries = append(b.d.Entries, maps.Values(b.knownFolders)...)
|
copy(ents, b.d.Entries)
|
||||||
|
|
||||||
return &b.d
|
// Write the cached folder entries to details
|
||||||
|
details := &Details{
|
||||||
|
DetailsModel{
|
||||||
|
Entries: append(ents, maps.Values(b.knownFolders)...),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return details
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------------------------------------------------------------
|
// --------------------------------------------------------------------------------
|
||||||
|
|||||||
@ -1033,6 +1033,84 @@ func (suite *DetailsUnitSuite) TestBuilder_Add_cleansFileIDSuffixes() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *DetailsUnitSuite) TestBuilder_DetailsNoDuplicate() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
b = Builder{}
|
||||||
|
svc = path.OneDriveService
|
||||||
|
cat = path.FilesCategory
|
||||||
|
info = ItemInfo{
|
||||||
|
OneDrive: &OneDriveInfo{
|
||||||
|
ItemType: OneDriveItem,
|
||||||
|
ItemName: "in",
|
||||||
|
DriveName: "dn",
|
||||||
|
DriveID: "d",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dataSfx = makeItemPath(t, svc, cat, "t", "u", []string{"d", "r:", "f", "i1" + metadata.DataFileSuffix})
|
||||||
|
dataSfx2 = makeItemPath(t, svc, cat, "t", "u", []string{"d", "r:", "f", "i2" + metadata.DataFileSuffix})
|
||||||
|
dirMetaSfx = makeItemPath(t, svc, cat, "t", "u", []string{"d", "r:", "f", "i1" + metadata.DirMetaFileSuffix})
|
||||||
|
metaSfx = makeItemPath(t, svc, cat, "t", "u", []string{"d", "r:", "f", "i1" + metadata.MetaFileSuffix})
|
||||||
|
)
|
||||||
|
|
||||||
|
// Don't need to generate folders for this entry, we just want the itemRef
|
||||||
|
loc := &path.Builder{}
|
||||||
|
|
||||||
|
err := b.Add(dataSfx, loc, false, info)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = b.Add(dataSfx2, loc, false, info)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = b.Add(dirMetaSfx, loc, false, info)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = b.Add(metaSfx, loc, false, info)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
b.knownFolders = map[string]Entry{
|
||||||
|
"dummy": {
|
||||||
|
RepoRef: "xyz",
|
||||||
|
ShortRef: "abcd",
|
||||||
|
ParentRef: "1234",
|
||||||
|
LocationRef: "ab",
|
||||||
|
ItemRef: "cd",
|
||||||
|
Updated: false,
|
||||||
|
ItemInfo: info,
|
||||||
|
},
|
||||||
|
"dummy2": {
|
||||||
|
RepoRef: "xyz2",
|
||||||
|
ShortRef: "abcd2",
|
||||||
|
ParentRef: "12342",
|
||||||
|
LocationRef: "ab2",
|
||||||
|
ItemRef: "cd2",
|
||||||
|
Updated: false,
|
||||||
|
ItemInfo: info,
|
||||||
|
},
|
||||||
|
"dummy3": {
|
||||||
|
RepoRef: "xyz3",
|
||||||
|
ShortRef: "abcd3",
|
||||||
|
ParentRef: "12343",
|
||||||
|
LocationRef: "ab3",
|
||||||
|
ItemRef: "cd3",
|
||||||
|
Updated: false,
|
||||||
|
ItemInfo: info,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark the capacity prior to calling details.
|
||||||
|
// if the entries slice gets modified and grows to a
|
||||||
|
// 5th space, then the capacity would grow as well.
|
||||||
|
capCheck := cap(b.d.Entries)
|
||||||
|
|
||||||
|
assert.Len(t, b.Details().Entries, 7) // 4 ents + 3 known folders
|
||||||
|
assert.Len(t, b.Details().Entries, 7) // possible reason for err: knownFolders got added twice
|
||||||
|
|
||||||
|
assert.Len(t, b.d.Entries, 4) // len should not have grown
|
||||||
|
assert.Equal(t, capCheck, cap(b.d.Entries)) // capacity should not have grown
|
||||||
|
}
|
||||||
|
|
||||||
func makeItemPath(
|
func makeItemPath(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user