Update backup details merge logic (#3963)

Update backup details merge logic to use assist
backup bases. As the modTime check is already in
DetailsMergeInfoer there's not much else to do
here besides wiring things up

Overall, this solution is an alternative to the
previous one. It works by placing all cached
items in the DetailsMergeInfoer instead of adding
them to details (assuming they had a details
entry)

During details merging, we can cycle through
all bases once and track only the items we've
added to details (so we don't duplicate things).
This works because we know precisely which items
we should be looking for

ModTime comparisons in the DetailsMergeInfoer
ensure we get the proper version of each item
details

**Note:** This requires a minor patch to how
we determine if it's safe to persist a backup
model because now backups won't produce details
entries for cached items until `mergeDetails`
runs

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [x] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

<!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. -->
* #<issue>

#### Test Plan

- [ ] 💪 Manual
- [x]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-08-09 19:45:19 -07:00 committed by GitHub
parent 67e38faf5e
commit 9667c79481
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 462 additions and 129 deletions

View File

@ -138,6 +138,9 @@ type StreamSize interface {
}
// StreamModTime is used to provide the modified time of the stream's data.
//
// If an item implements StreamModTime and StreamInfo it should return the same
// value here as in item.Info().Modified().
type StreamModTime interface {
ModTime() time.Time
}

View File

@ -198,10 +198,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
// These items were sourced from a base snapshot or were cached in kopia so we
// never had to materialize their details in-memory.
//
// TODO(ashmrtn): When we're ready to merge with cached items add cached as a
// condition here.
if d.info == nil {
if d.info == nil || d.cached {
if d.prevPath == nil {
cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path").
With(

View File

@ -468,10 +468,9 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
cached: false,
},
{
name: "all cached from assist base",
cached: true,
// TODO(ashmrtn): Update to true when we add cached items to toMerge.
expectToMergeEntries: false,
name: "all cached from assist base",
cached: true,
expectToMergeEntries: true,
},
{
name: "all cached from merge base",

View File

@ -965,9 +965,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
collections: collections,
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},
// Entries go to details merger since cached files are merged too.
expectMerge: true,
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},
},
{
name: "Kopia Assist And Merge No Files Changed",
@ -999,6 +1001,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
collections: collections,
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
expectMerge: true,
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},

View File

@ -361,7 +361,7 @@ func (op *BackupOperation) do(
err = mergeDetails(
ctx,
detailsStore,
mans.Backups(),
mans,
toMerge,
deets,
writeStats,
@ -596,10 +596,118 @@ func getNewPathRefs(
return newPath, newLoc, updated, nil
}
func mergeItemsFromBase(
ctx context.Context,
checkReason bool,
baseBackup kopia.BackupEntry,
detailsStore streamstore.Streamer,
dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder,
alreadySeenItems map[string]struct{},
errs *fault.Bus,
) (int, error) {
var (
manifestAddedEntries int
totalBaseItems int
)
// Can't be in the above block else it's counted as a redeclaration.
ctx = clues.Add(ctx, "base_backup_id", baseBackup.ID)
baseDeets, err := getDetailsFromBackup(
ctx,
baseBackup.Backup,
detailsStore,
errs)
if err != nil {
return manifestAddedEntries,
clues.New("fetching base details for backup").WithClues(ctx)
}
for _, entry := range baseDeets.Items() {
// Track this here instead of calling Items() again to get the count since
// it can be a bit expensive.
totalBaseItems++
rr, err := path.FromDataLayerPath(entry.RepoRef, true)
if err != nil {
return manifestAddedEntries, clues.New("parsing base item info path").
WithClues(ctx).
With("repo_ref", path.LoggableDir(entry.RepoRef))
}
// Although this base has an entry it may not be the most recent. Check
// the reasons a snapshot was returned to ensure we only choose the recent
// entries.
//
// We only really want to do this check for merge bases though because
// kopia won't abide by reasons when determining if an item's cached. This
// leaves us in a bit of a pickle if the user has run any concurrent backups
// with overlapping reasons that then turn into assist bases, but the
// modTime check in DetailsMergeInfoer should handle that.
if checkReason && !matchesReason(baseBackup.Reasons, rr) {
continue
}
// Skip items that were already found in a previous base backup.
if _, ok := alreadySeenItems[rr.ShortRef()]; ok {
continue
}
ictx := clues.Add(ctx, "repo_ref", rr)
newPath, newLoc, locUpdated, err := getNewPathRefs(
dataFromBackup,
entry,
rr,
baseBackup.Version)
if err != nil {
return manifestAddedEntries,
clues.Wrap(err, "getting updated info for entry").WithClues(ictx)
}
// This entry isn't merged.
if newPath == nil {
continue
}
// Fixup paths in the item.
item := entry.ItemInfo
details.UpdateItem(&item, newLoc)
// TODO(ashmrtn): This can most likely be removed altogether.
itemUpdated := newPath.String() != rr.String() || locUpdated
err = deets.Add(
newPath,
newLoc,
itemUpdated,
item)
if err != nil {
return manifestAddedEntries,
clues.Wrap(err, "adding item to details").WithClues(ictx)
}
// Make sure we won't add this again in another base.
alreadySeenItems[rr.ShortRef()] = struct{}{}
// Track how many entries we added so that we know if we got them all when
// we're done.
manifestAddedEntries++
}
logger.Ctx(ctx).Infow(
"merged details with base manifest",
"count_base_item_unfiltered", totalBaseItems,
"count_base_item_added", manifestAddedEntries)
return manifestAddedEntries, nil
}
func mergeDetails(
ctx context.Context,
detailsStore streamstore.Streamer,
backups []kopia.BackupEntry,
bases kopia.BackupBases,
dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder,
writeStats *kopia.BackupStats,
@ -614,88 +722,68 @@ func mergeDetails(
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
// Don't bother loading any of the base details if there's nothing we need to merge.
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
if bases == nil || dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
return nil
}
var addedEntries int
var (
addedEntries int
// alreadySeenEntries tracks items that we've already merged so we don't
// accidentally merge them again. This could happen if, for example, there's
// an assist backup and a merge backup that both have the same version of an
// item at the same path.
alreadySeenEntries = map[string]struct{}{}
)
for _, baseBackup := range backups {
var (
mctx = clues.Add(ctx, "base_backup_id", baseBackup.ID)
manifestAddedEntries int
)
baseDeets, err := getDetailsFromBackup(
mctx,
baseBackup.Backup,
// Merge details from assist bases first. It shouldn't technically matter
// since the DetailsMergeInfoer should take into account the modTime of items,
// but just to be on the safe side.
//
// We don't want to match entries based on Reason for assist bases because
// kopia won't abide by Reasons when determining if an item's cached. This
// leaves us in a bit of a pickle if the user has run any concurrent backups
// with overlapping Reasons that turn into assist bases, but the modTime check
// in DetailsMergeInfoer should handle that.
for _, base := range bases.AssistBackups() {
added, err := mergeItemsFromBase(
ctx,
false,
base,
detailsStore,
dataFromBackup,
deets,
alreadySeenEntries,
errs)
if err != nil {
return clues.New("fetching base details for backup")
return clues.Wrap(err, "merging assist backup base details")
}
for _, entry := range baseDeets.Items() {
rr, err := path.FromDataLayerPath(entry.RepoRef, true)
if err != nil {
return clues.New("parsing base item info path").
WithClues(mctx).
With("repo_ref", path.NewElements(entry.RepoRef))
}
addedEntries = addedEntries + added
}
// Although this base has an entry it may not be the most recent. Check
// the reasons a snapshot was returned to ensure we only choose the recent
// entries.
//
// TODO(ashmrtn): This logic will need expanded to cover entries from
// checkpoints if we start doing kopia-assisted incrementals for those.
if !matchesReason(baseBackup.Reasons, rr) {
continue
}
mctx = clues.Add(mctx, "repo_ref", rr)
newPath, newLoc, locUpdated, err := getNewPathRefs(
dataFromBackup,
entry,
rr,
baseBackup.Version)
if err != nil {
return clues.Wrap(err, "getting updated info for entry").WithClues(mctx)
}
// This entry isn't merged.
if newPath == nil {
continue
}
// Fixup paths in the item.
item := entry.ItemInfo
details.UpdateItem(&item, newLoc)
// TODO(ashmrtn): This may need updated if we start using this merge
// strategry for items that were cached in kopia.
itemUpdated := newPath.String() != rr.String() || locUpdated
err = deets.Add(
newPath,
newLoc,
itemUpdated,
item)
if err != nil {
return clues.Wrap(err, "adding item to details")
}
// Track how many entries we added so that we know if we got them all when
// we're done.
addedEntries++
manifestAddedEntries++
// Now add entries from the merge base backups. These will be things that
// weren't changed in the new backup. Items that were already added because
// they were counted as cached in an assist base backup will be skipped due to
// alreadySeenEntries.
//
// We do want to enable matching entries based on Reasons because we
// explicitly control which subtrees from the merge base backup are grafted
// onto the hierarchy for the currently running backup.
for _, base := range bases.Backups() {
added, err := mergeItemsFromBase(
ctx,
true,
base,
detailsStore,
dataFromBackup,
deets,
alreadySeenEntries,
errs)
if err != nil {
return clues.Wrap(err, "merging merge backup base details")
}
logger.Ctx(mctx).Infow(
"merged details with base manifest",
"base_item_count_unfiltered", len(baseDeets.Items()),
"base_item_count_added", manifestAddedEntries)
addedEntries = addedEntries + added
}
checkCount := dataFromBackup.ItemsToMerge()

View File

@ -2,6 +2,7 @@ package operations
import (
"context"
"encoding/json"
stdpath "path"
"testing"
"time"
@ -137,9 +138,9 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections(
type mockDetailsMergeInfoer struct {
repoRefs map[string]path.Path
locs map[string]*path.Builder
modTimes map[string]time.Time
}
// TODO(ashmrtn): Update this to take mod time?
func (m *mockDetailsMergeInfoer) add(oldRef, newRef path.Path, newLoc *path.Builder) {
oldPB := oldRef.ToBuilder()
// Items are indexed individually.
@ -149,11 +150,31 @@ func (m *mockDetailsMergeInfoer) add(oldRef, newRef path.Path, newLoc *path.Buil
m.locs[oldPB.ShortRef()] = newLoc
}
func (m *mockDetailsMergeInfoer) addWithModTime(
oldRef path.Path,
modTime time.Time,
newRef path.Path,
newLoc *path.Builder,
) {
oldPB := oldRef.ToBuilder()
// Items are indexed individually.
m.repoRefs[oldPB.ShortRef()] = newRef
m.modTimes[oldPB.ShortRef()] = modTime
// Locations are indexed by directory.
m.locs[oldPB.ShortRef()] = newLoc
}
func (m *mockDetailsMergeInfoer) GetNewPathRefs(
oldRef *path.Builder,
_ time.Time,
modTime time.Time,
_ details.LocationIDer,
) (path.Path, *path.Builder, error) {
// Return no match if the modTime was set and it wasn't what was passed in.
if mt, ok := m.modTimes[oldRef.ShortRef()]; ok && !mt.Equal(modTime) {
return nil, nil, nil
}
return m.repoRefs[oldRef.ShortRef()], m.locs[oldRef.ShortRef()], nil
}
@ -169,6 +190,7 @@ func newMockDetailsMergeInfoer() *mockDetailsMergeInfoer {
return &mockDetailsMergeInfoer{
repoRefs: map[string]path.Path{},
locs: map[string]*path.Builder{},
modTimes: map[string]time.Time{},
}
}
@ -295,6 +317,30 @@ func makeDetailsEntry(
return res
}
func makeDetailsEntryWithModTime(
t *testing.T,
p path.Path,
l *path.Builder,
size int,
updated bool,
modTime time.Time,
) *details.Entry {
t.Helper()
res := makeDetailsEntry(t, p, l, size, updated)
switch {
case res.Exchange != nil:
res.Exchange.Modified = modTime
case res.OneDrive != nil:
res.OneDrive.Modified = modTime
case res.SharePoint != nil:
res.SharePoint.Modified = modTime
}
return res
}
// ---------------------------------------------------------------------------
// unit tests
// ---------------------------------------------------------------------------
@ -548,6 +594,9 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
itemPath3.ResourceOwner(),
itemPath3.Service(),
itemPath3.Category())
time1 = time.Now()
time2 = time1.Add(time.Hour)
)
itemParents1, err := path.GetDriveFolderPath(itemPath1)
@ -556,10 +605,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
itemParents1String := itemParents1.String()
table := []struct {
name string
populatedDetails map[string]*details.Details
inputBackups []kopia.BackupEntry
mdm *mockDetailsMergeInfoer
name string
populatedDetails map[string]*details.Details
inputBackups []kopia.BackupEntry
inputAssistBackups []kopia.BackupEntry
mdm *mockDetailsMergeInfoer
errCheck assert.ErrorAssertionFunc
expectedEntries []*details.Entry
@ -628,39 +678,6 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
},
errCheck: assert.Error,
},
{
name: "TooManyItems",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.add(itemPath1, itemPath1, locationPath1)
return res
}(),
inputBackups: []kopia.BackupEntry{
{
Backup: &backup1,
Reasons: []identity.Reasoner{
pathReason1,
},
},
{
Backup: &backup1,
Reasons: []identity.Reasoner{
pathReason1,
},
},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false),
},
},
},
},
errCheck: assert.Error,
},
{
name: "BadBaseRepoRef",
mdm: func() *mockDetailsMergeInfoer {
@ -916,6 +933,210 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
makeDetailsEntry(suite.T(), itemPath3, locationPath3, 37, false),
},
},
{
name: "MergeAndAssistBases SameItems",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.addWithModTime(itemPath1, time1, itemPath1, locationPath1)
res.addWithModTime(itemPath3, time2, itemPath3, locationPath3)
return res
}(),
inputBackups: []kopia.BackupEntry{
{
Backup: &backup1,
Reasons: []identity.Reasoner{
pathReason1,
pathReason3,
},
},
},
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup2},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
*makeDetailsEntryWithModTime(suite.T(), itemPath3, locationPath3, 37, false, time2),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
*makeDetailsEntryWithModTime(suite.T(), itemPath3, locationPath3, 37, false, time2),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
makeDetailsEntryWithModTime(suite.T(), itemPath3, locationPath3, 37, false, time2),
},
},
{
name: "MergeAndAssistBases AssistBaseHasNewerItems",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.addWithModTime(itemPath1, time2, itemPath1, locationPath1)
return res
}(),
inputBackups: []kopia.BackupEntry{
{
Backup: &backup1,
Reasons: []identity.Reasoner{
pathReason1,
},
},
},
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup2},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 84, false, time2),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 84, false, time2),
},
},
{
name: "AssistBases ConcurrentAssistBasesPicksMatchingVersion1",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.addWithModTime(itemPath1, time2, itemPath1, locationPath1)
return res
}(),
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup1},
{Backup: &backup2},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 84, false, time2),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 84, false, time2),
},
},
{
name: "AssistBases ConcurrentAssistBasesPicksMatchingVersion2",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.addWithModTime(itemPath1, time1, itemPath1, locationPath1)
return res
}(),
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup1},
{Backup: &backup2},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 84, false, time2),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
{
name: "AssistBases SameItemVersion",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.addWithModTime(itemPath1, time1, itemPath1, locationPath1)
return res
}(),
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup1},
{Backup: &backup2},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
{
name: "AssistBase ItemDeleted",
mdm: func() *mockDetailsMergeInfoer {
return newMockDetailsMergeInfoer()
}(),
inputAssistBackups: []kopia.BackupEntry{
{Backup: &backup1},
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntryWithModTime(suite.T(), itemPath1, locationPath1, 42, false, time1),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{},
},
}
for _, test := range table {
@ -929,10 +1150,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
deets := details.Builder{}
writeStats := kopia.BackupStats{}
bb := kopia.NewMockBackupBases().
WithBackups(test.inputBackups...).
WithAssistBackups(test.inputAssistBackups...)
err := mergeDetails(
ctx,
mds,
test.inputBackups,
bb,
test.mdm,
&deets,
&writeStats,
@ -944,11 +1169,29 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return
}
assert.ElementsMatch(t, test.expectedEntries, deets.Details().Items())
// Check the JSON output format of things because for some reason it's not
// using the proper comparison for time.Time and failing due to that.
checkJSONOutputs(t, test.expectedEntries, deets.Details().Items())
})
}
}
func checkJSONOutputs(
t *testing.T,
expected []*details.Entry,
got []*details.Entry,
) {
t.Helper()
expectedJSON, err := json.Marshal(expected)
require.NoError(t, err, "marshalling expected data")
gotJSON, err := json.Marshal(got)
require.NoError(t, err, "marshalling got data")
assert.JSONEq(t, string(expectedJSON), string(gotJSON))
}
func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolders() {
var (
t = suite.T()
@ -1038,7 +1281,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
err := mergeDetails(
ctx,
mds,
[]kopia.BackupEntry{backup1},
kopia.NewMockBackupBases().WithBackups(backup1),
mdm,
&deets,
&writeStats,

View File

@ -401,7 +401,7 @@ func runDriveIncrementalTest(
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, // the file for which permission was updated
nonMetaItemsWritten: 0, // none because the file is considered cached instead of written.
},
{
name: "remove permission from new file",
@ -419,7 +419,7 @@ func runDriveIncrementalTest(
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, //.data file for newitem
nonMetaItemsWritten: 0, // none because the file is considered cached instead of written.
},
{
name: "add permission to container",
@ -518,7 +518,7 @@ func runDriveIncrementalTest(
},
itemsRead: 1, // .data file for newitem
itemsWritten: 4, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
nonMetaItemsWritten: 1, // .data file for moved item
},
{
name: "boomerang a file",
@ -550,7 +550,7 @@ func runDriveIncrementalTest(
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
nonMetaItemsWritten: 0, // non because the file is considered cached instead of written.
},
{
name: "delete file",