Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Behavior change: start with no arguments uses existing cluster config #7449

Merged
merged 21 commits into from
Apr 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 0 additions & 27 deletions cmd/minikube/cmd/flags.go

This file was deleted.

350 changes: 7 additions & 343 deletions cmd/minikube/cmd/start.go

Large diffs are not rendered by default.

587 changes: 587 additions & 0 deletions cmd/minikube/cmd/start_flags.go

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions cmd/minikube/cmd/start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func TestMirrorCountry(t *testing.T) {
cmd := &cobra.Command{}
viper.SetDefault(imageRepository, test.imageRepository)
viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err)
}
Expand Down Expand Up @@ -166,7 +166,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
}
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err)
}
Expand Down
1 change: 0 additions & 1 deletion pkg/minikube/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...
}

func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error {
// Move to profile package
path := profileFilePath(profileName, miniHome...)
contents, err := json.MarshalIndent(cc, "", " ")
if err != nil {
Expand Down
48 changes: 22 additions & 26 deletions pkg/minikube/node/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"net"
"os"
"os/exec"
"runtime/debug"
"strconv"
"strings"
"sync"
Expand Down Expand Up @@ -56,13 +57,7 @@ import (
"k8s.io/minikube/pkg/util/retry"
)

const (
waitTimeout = "wait-timeout"
embedCerts = "embed-certs"
keepContext = "keep-context"
imageRepository = "image-repository"
containerRuntime = "container-runtime"
)
const waitTimeout = "wait-timeout"

// Start spins up a guest and starts the kubernetes node.
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
Expand Down Expand Up @@ -103,7 +98,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
}

// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv)
cr := configureRuntimes(mRunner, cc, sv)
showVersionInfo(n.KubernetesVersion, cr)

var bs bootstrapper.Bootstrapper
Expand Down Expand Up @@ -189,10 +184,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
}

// ConfigureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager {
func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: viper.GetString(containerRuntime),
Runner: runner, ImageRepository: k8s.ImageRepository,
Type: cc.KubernetesConfig.ContainerRuntime,
Runner: runner,
ImageRepository: cc.KubernetesConfig.ImageRepository,
KubernetesVersion: kv,
}
cr, err := cruntime.New(co)
Expand All @@ -201,28 +197,29 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config
}

disableOthers := true
if driver.BareMetal(drvName) {
if driver.BareMetal(cc.Driver) {
disableOthers = false
}

// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
if driver.IsVM(drvName) {
if err := cr.Preload(k8s); err != nil {
if driver.IsVM(cc.Driver) {
if err := cr.Preload(cc.KubernetesConfig); err != nil {
switch err.(type) {
case *cruntime.ErrISOFeature:
out.ErrT(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err})
default:
glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err)
}

if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
if err := machine.CacheImagesForBootstrapper(cc.KubernetesConfig.ImageRepository, cc.KubernetesConfig.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
exit.WithError("Failed to cache images", err)
}
}
}

err = cr.Enable(disableOthers)
if err != nil {
debug.PrintStack()
exit.WithError("Failed to enable container runtime", err)
}

Expand Down Expand Up @@ -275,8 +272,8 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu
ClientCertificate: localpath.ClientCert(cc.Name),
ClientKey: localpath.ClientKey(cc.Name),
CertificateAuthority: localpath.CACert(),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
KeepContext: cc.KeepContext,
EmbedCerts: cc.EmbedCerts,
}

kcs.SetPath(kubeconfig.PathFromEnv())
Expand All @@ -303,7 +300,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
exit.WithError("Failed to get command runner", err)
}

ip := validateNetwork(host, runner)
ip := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)

// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
Expand Down Expand Up @@ -352,7 +349,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
}

// validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner) string {
func validateNetwork(h *host.Host, r command.Runner, imageRepository string) string {
ip, err := h.Driver.GetIP()
if err != nil {
exit.WithError("Unable to get VM IP address", err)
Expand Down Expand Up @@ -381,7 +378,7 @@ func validateNetwork(h *host.Host, r command.Runner) string {
}

// Non-blocking
go tryRegistry(r, h.Driver.DriverName())
go tryRegistry(r, h.Driver.DriverName(), imageRepository)
return ip
}

Expand Down Expand Up @@ -423,7 +420,7 @@ func trySSH(h *host.Host, ip string) {
}

// tryRegistry tries to connect to the image repository
func tryRegistry(r command.Runner, driverName string) {
func tryRegistry(r command.Runner, driverName string, imageRepository string) {
// 2 second timeout. For best results, call tryRegistry in a non-blocking manner.
opts := []string{"-sS", "-m", "2"}

Expand All @@ -432,15 +429,14 @@ func tryRegistry(r command.Runner, driverName string) {
opts = append([]string{"-x", proxy}, opts...)
}

repo := viper.GetString(imageRepository)
if repo == "" {
repo = images.DefaultKubernetesRepo
if imageRepository == "" {
imageRepository = images.DefaultKubernetesRepo
}

opts = append(opts, fmt.Sprintf("https://%s/", repo))
opts = append(opts, fmt.Sprintf("https://%s/", imageRepository))
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": repo, "type": driver.MachineType(driverName)})
out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": imageRepository, "type": driver.MachineType(driverName)})
out.ErrT(out.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/")
}
}
Expand Down
41 changes: 39 additions & 2 deletions test/integration/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,20 +38,24 @@ import (

"github.com/google/go-cmp/cmp"

"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/util/retry"

"github.com/elazarl/goproxy"
"github.com/hashicorp/go-retryablehttp"
"github.com/otiai10/copy"
"github.com/phayes/freeport"
"github.com/pkg/errors"
"golang.org/x/build/kubernetes/api"
"k8s.io/minikube/pkg/util/retry"
)

// validateFunc are for subtests that share a single setup
type validateFunc func(context.Context, *testing.T, string)

// used in validateStartWithProxy and validateSoftStart
var apiPortTest = 8441

// TestFunctional are functionality tests which can safely share a profile in parallel
func TestFunctional(t *testing.T) {

Expand Down Expand Up @@ -80,6 +84,7 @@ func TestFunctional(t *testing.T) {
}{
{"CopySyncFile", setupFileSync}, // Set file for the file sync test case
{"StartWithProxy", validateStartWithProxy}, // Set everything else up for success
{"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change.
{"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start"
{"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up
{"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy
Expand Down Expand Up @@ -184,7 +189,8 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
}

// Use more memory so that we may reliably fit MySQL and nginx
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
// changing api server so later in soft start we verify it didn't change
startArgs := append([]string{"start", "-p", profile, fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=true"}, StartArgs()...)
c := exec.CommandContext(ctx, Target(), startArgs...)
env := os.Environ()
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
Expand All @@ -206,6 +212,37 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
}
}

// validateSoftStart validates that after minikube already started, a "minikube start" should not change the configs.
func validateSoftStart(ctx context.Context, t *testing.T, profile string) {
start := time.Now()
// the test before this had been start with --apiserver-port=8441
beforeCfg, err := config.LoadProfile(profile)
if err != nil {
t.Errorf("error reading cluster config before soft start: %v", err)
}
if beforeCfg.Config.KubernetesConfig.NodePort != apiPortTest {
t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort)
}

softStartArgs := []string{"start", "-p", profile}
c := exec.CommandContext(ctx, Target(), softStartArgs...)
rr, err := Run(t, c)
if err != nil {
t.Errorf("failed to soft start minikube. args %q: %v", rr.Command(), err)
}
t.Logf("soft start took %s for %q cluster.", time.Since(start), profile)

afterCfg, err := config.LoadProfile(profile)
if err != nil {
t.Errorf("error reading cluster config after soft start: %v", err)
}

if afterCfg.Config.KubernetesConfig.NodePort != apiPortTest {
t.Errorf("expected node port in the config not change after soft start. exepceted node port to be %d but got %d.", apiPortTest, afterCfg.Config.KubernetesConfig.NodePort)
}

}

// validateKubeContext asserts that kubectl is properly configured (race-condition prone!)
func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context"))
Expand Down