Validate kopia config (#5118)
Add function that returns errors if it finds issues with common config
info in the kopia repo. Parameters that are currently checked are:
* kopia global policy:
* kopia snapshot retention is disabled
* kopia compression matches the default compression for corso
* kopia scheduling is disabled
* object locking:
* maintenance and blob config blob parameters are consistent (i.e. all
enabled or all disabled)
Note that tests for this will fail until alcionai/clues#40 is
merged and clues is updated in corso
---
#### Does this PR need a docs update or release note?
- [ ] ✅ Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x] ⛔ No
#### Type of change
- [x] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [x] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup
#### Issue(s)
merge after:
* #5117
* alcionai/clues#40
#### Test Plan
- [ ] 💪 Manual
- [x] ⚡ Unit test
- [ ] 💚 E2E
This commit is contained in:
parent
c3f4dd6bcf
commit
d426250931
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -736,3 +737,74 @@ func (w *conn) updatePersistentConfig(
|
||||
"persisting updated config").
|
||||
OrNil()
|
||||
}
|
||||
|
||||
func (w *conn) verifyDefaultPolicyConfigOptions(ctx context.Context) error {
|
||||
var errs *clues.Err
|
||||
|
||||
globalPol, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "current_global_policy", globalPol.String())
|
||||
|
||||
if globalPol.CompressionPolicy.CompressorName != defaultCompressor {
|
||||
errs = clues.Stack(errs, clues.NewWC(
|
||||
ctx,
|
||||
"current compressor doesn't match default").
|
||||
With("expected_compression_policy", defaultCompressor))
|
||||
}
|
||||
|
||||
// Need to use deep equals because the values are pointers to optional types.
|
||||
// That makes regular equality checks fail even if the data contained in each
|
||||
// policy is the same.
|
||||
if !reflect.DeepEqual(globalPol.RetentionPolicy, defaultRetention) {
|
||||
// Unfortunately the policy has pointers to things and doesn't serialize
|
||||
// well. This makes it difficult to add the expected retention policy.
|
||||
errs = clues.Stack(errs, clues.NewWC(
|
||||
ctx,
|
||||
"current snapshot retention policy doesn't match default"))
|
||||
}
|
||||
|
||||
if globalPol.SchedulingPolicy.Interval() != defaultSchedulingInterval {
|
||||
errs = clues.Stack(errs, clues.NewWC(
|
||||
ctx,
|
||||
"current scheduling policy doesn't match default").
|
||||
With(
|
||||
"expected_scheduling_policy", defaultSchedulingInterval))
|
||||
}
|
||||
|
||||
return errs.OrNil()
|
||||
}
|
||||
|
||||
func (w *conn) verifyRetentionConfig(ctx context.Context) error {
|
||||
directRepo, ok := w.Repository.(repo.DirectRepository)
|
||||
if !ok {
|
||||
return clues.NewWC(ctx, "getting repo handle")
|
||||
}
|
||||
|
||||
blobConfig, maintenanceParams, err := getRetentionConfigs(ctx, directRepo)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
return clues.Stack(retention.OptsFromConfigs(
|
||||
*blobConfig,
|
||||
*maintenanceParams).Verify(ctx)).OrNil()
|
||||
}
|
||||
|
||||
// verifyDefaultConfigOptions checks the following configurations:
|
||||
// kopia global policy:
|
||||
// - kopia snapshot retention is disabled
|
||||
// - kopia compression matches the default compression for corso
|
||||
// - kopia scheduling is disabled
|
||||
//
|
||||
// object locking:
|
||||
// - maintenance and blob config blob parameters are consistent (i.e. all
|
||||
// enabled or all disabled)
|
||||
func (w *conn) verifyDefaultConfigOptions(ctx context.Context) error {
|
||||
errs := clues.Stack(w.verifyDefaultPolicyConfigOptions(ctx))
|
||||
errs = clues.Stack(errs, w.verifyRetentionConfig(ctx))
|
||||
|
||||
return errs.OrNil()
|
||||
}
|
||||
|
||||
@ -779,3 +779,239 @@ func (suite *ConnRetentionIntegrationSuite) TestInitWithAndWithoutRetention() {
|
||||
// Some checks to make sure retention was fully initialized as expected.
|
||||
checkRetentionParams(t, ctx, k2, blob.Governance, time.Hour*48, assert.True)
|
||||
}
|
||||
|
||||
// TestVerifyDefaultConfigOptions checks that if the repo has misconfigured
|
||||
// values an error is returned. This is easiest to do in a test suite that
|
||||
// allows object locking because some of the configured values that are checked
|
||||
// relate to object locking.
|
||||
func (suite *ConnRetentionIntegrationSuite) TestVerifyDefaultConfigOptions() {
|
||||
nonzeroOpt := policy.OptionalInt(42)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
setupRepo func(context.Context, *testing.T, *conn)
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "ValidConfigs NoRetention",
|
||||
setupRepo: func(context.Context, *testing.T, *conn) {},
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "ValidConfigs Retention",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
err := con.setRetentionParameters(
|
||||
ctx,
|
||||
repository.Retention{
|
||||
Mode: ptr.To(repository.GovernanceRetention),
|
||||
Duration: ptr.To(48 * time.Hour),
|
||||
Extend: ptr.To(true),
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "ValidRetentionButNotExtending",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
err := con.setRetentionParameters(
|
||||
ctx,
|
||||
repository.Retention{
|
||||
Mode: ptr.To(repository.GovernanceRetention),
|
||||
Duration: ptr.To(48 * time.Hour),
|
||||
Extend: ptr.To(false),
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "ExtendingRetentionButNotConfigured",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
err := con.setRetentionParameters(
|
||||
ctx,
|
||||
repository.Retention{
|
||||
Extend: ptr.To(true),
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroScheduleInterval",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateSchedulingOnPolicy(time.Hour, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonDefaultCompression",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, err = updateCompressionOnPolicy("pgzip-best-speed", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionLatest",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &nonzeroOpt,
|
||||
KeepHourly: &zeroOpt,
|
||||
KeepWeekly: &zeroOpt,
|
||||
KeepDaily: &zeroOpt,
|
||||
KeepMonthly: &zeroOpt,
|
||||
KeepAnnual: &zeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionHourly",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &zeroOpt,
|
||||
KeepHourly: &nonzeroOpt,
|
||||
KeepWeekly: &zeroOpt,
|
||||
KeepDaily: &zeroOpt,
|
||||
KeepMonthly: &zeroOpt,
|
||||
KeepAnnual: &zeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionWeekly",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &zeroOpt,
|
||||
KeepHourly: &zeroOpt,
|
||||
KeepWeekly: &nonzeroOpt,
|
||||
KeepDaily: &zeroOpt,
|
||||
KeepMonthly: &zeroOpt,
|
||||
KeepAnnual: &zeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionDaily",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &zeroOpt,
|
||||
KeepHourly: &zeroOpt,
|
||||
KeepWeekly: &zeroOpt,
|
||||
KeepDaily: &nonzeroOpt,
|
||||
KeepMonthly: &zeroOpt,
|
||||
KeepAnnual: &zeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionMonthly",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &zeroOpt,
|
||||
KeepHourly: &zeroOpt,
|
||||
KeepWeekly: &zeroOpt,
|
||||
KeepDaily: &zeroOpt,
|
||||
KeepMonthly: &nonzeroOpt,
|
||||
KeepAnnual: &zeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "NonZeroSnapshotRetentionAnnual",
|
||||
setupRepo: func(ctx context.Context, t *testing.T, con *conn) {
|
||||
retention := policy.RetentionPolicy{
|
||||
KeepLatest: &zeroOpt,
|
||||
KeepHourly: &zeroOpt,
|
||||
KeepWeekly: &zeroOpt,
|
||||
KeepDaily: &zeroOpt,
|
||||
KeepMonthly: &zeroOpt,
|
||||
KeepAnnual: &nonzeroOpt,
|
||||
}
|
||||
pol, err := con.getGlobalPolicyOrEmpty(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
updateRetentionOnPolicy(retention, pol)
|
||||
|
||||
err = con.writeGlobalPolicy(ctx, "test", pol)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
t.Cleanup(flush)
|
||||
|
||||
repoNameHash := strTD.NewHashForRepoConfigName()
|
||||
st1 := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
con := NewConn(st1)
|
||||
err := con.Initialize(ctx, repository.Options{}, repository.Retention{}, repoNameHash)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
t.Cleanup(func() { con.Close(ctx) })
|
||||
|
||||
test.setupRepo(ctx, t, con)
|
||||
|
||||
err = con.verifyDefaultConfigOptions(ctx)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user