<!-- PR description--> One of the 2 remaining setup PRs before we can introduce local storage repos. **Changes:** 1. Read storage provider(`provider`) from config file except for `repo init * ` or `repo connect *` commands. 2. Apply flag overrides based on provider type( e.g. `S3FlagOverrides` if provider is `S3`) 3. Propagate storage provider type to functions which read/write config. These functions arbitrate on config hierarchy - flags, env, config file, in that order. **Reasons** * Reason 1 is needed is because config file is the source of truth for storage provider for all commands except `repo init` or `repo connect`. In the exception cases, we pick the provider in command (e.g. `s3`) as the source of truth. e.g. consider a `repo init s3`, followed by `repo init filesystem`.During `repo init filesystem`, config file would indicate `S3` provider, but the correct behavior here is to select `filesystem` provider. * One alternative was to push provider from the init/connect cmds into an override flag, and let the config code decide on hierarchy. However, this felt hacky. provider here is not a flag to begin with. It's part of init/connect commands. --- #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [x] 🕐 Yes, but in a later PR - [ ] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [ ] 🌻 Feature - [ ] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [x] 🧹 Tech Debt/Cleanup #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * https://github.com/alcionai/corso/issues/1416 #### Test Plan <!-- How will this be tested prior to merging.--> - [x] 💪 Manual - [ ] ⚡ Unit test - [ ] 💚 E2E
120 lines
2.9 KiB
Go
120 lines
2.9 KiB
Go
package export
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"os"
|
|
|
|
"github.com/alcionai/clues"
|
|
"github.com/spf13/cobra"
|
|
|
|
. "github.com/alcionai/corso/src/cli/print"
|
|
"github.com/alcionai/corso/src/cli/utils"
|
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
|
"github.com/alcionai/corso/src/internal/data"
|
|
"github.com/alcionai/corso/src/internal/observe"
|
|
"github.com/alcionai/corso/src/pkg/control"
|
|
"github.com/alcionai/corso/src/pkg/export"
|
|
"github.com/alcionai/corso/src/pkg/selectors"
|
|
)
|
|
|
|
var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
|
|
addOneDriveCommands,
|
|
addSharePointCommands,
|
|
// awaiting release
|
|
// addGroupsCommands,
|
|
// addTeamsCommands,
|
|
}
|
|
|
|
// AddCommands attaches all `corso export * *` commands to the parent.
|
|
func AddCommands(cmd *cobra.Command) {
|
|
exportC := exportCmd()
|
|
cmd.AddCommand(exportC)
|
|
|
|
for _, addExportTo := range exportCommands {
|
|
addExportTo(exportC)
|
|
}
|
|
|
|
// delete after release
|
|
if len(os.Getenv("CORSO_ENABLE_GROUPS")) > 0 {
|
|
addGroupsCommands(exportC)
|
|
}
|
|
}
|
|
|
|
const exportCommand = "export"
|
|
|
|
// The export category of commands.
|
|
// `corso export [<subcommand>] [<flag>...]`
|
|
func exportCmd() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: exportCommand,
|
|
Short: "Export your service data",
|
|
Long: `Export the data stored in one of your M365 services.`,
|
|
RunE: handleExportCmd,
|
|
Args: cobra.NoArgs,
|
|
}
|
|
}
|
|
|
|
// Handler for flat calls to `corso export`.
|
|
// Produces the same output as `corso export --help`.
|
|
func handleExportCmd(cmd *cobra.Command, args []string) error {
|
|
return cmd.Help()
|
|
}
|
|
|
|
func runExport(
|
|
ctx context.Context,
|
|
cmd *cobra.Command,
|
|
args []string,
|
|
ueco utils.ExportCfgOpts,
|
|
sel selectors.Selector,
|
|
backupID, serviceName string,
|
|
) error {
|
|
r, _, _, _, err := utils.GetAccountAndConnectWithOverrides(
|
|
ctx,
|
|
cmd,
|
|
sel.PathService())
|
|
if err != nil {
|
|
return Only(ctx, err)
|
|
}
|
|
|
|
defer utils.CloseRepo(ctx, r)
|
|
|
|
exportLocation := args[0]
|
|
if len(exportLocation) == 0 {
|
|
// This should not be possible, but adding it just in case.
|
|
exportLocation = control.DefaultRestoreLocation + dttm.FormatNow(dttm.HumanReadableDriveItem)
|
|
}
|
|
|
|
Infof(ctx, "Exporting to folder %s", exportLocation)
|
|
|
|
eo, err := r.NewExport(
|
|
ctx,
|
|
backupID,
|
|
sel,
|
|
utils.MakeExportConfig(ctx, ueco))
|
|
if err != nil {
|
|
return Only(ctx, clues.Wrap(err, "Failed to initialize "+serviceName+" export"))
|
|
}
|
|
|
|
expColl, err := eo.Run(ctx)
|
|
if err != nil {
|
|
if errors.Is(err, data.ErrNotFound) {
|
|
return Only(ctx, clues.New("Backup or backup details missing for id "+backupID))
|
|
}
|
|
|
|
return Only(ctx, clues.Wrap(err, "Failed to run "+serviceName+" export"))
|
|
}
|
|
|
|
// It would be better to give a progressbar than a spinner, but we
|
|
// have any way of knowing how many files are available as of now.
|
|
diskWriteComplete := observe.MessageWithCompletion(ctx, "Writing data to disk")
|
|
defer close(diskWriteComplete)
|
|
|
|
err = export.ConsumeExportCollections(ctx, exportLocation, expColl, eo.Errors)
|
|
if err != nil {
|
|
return Only(ctx, err)
|
|
}
|
|
|
|
return nil
|
|
}
|