Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to per-image tunnels, CRC on tags, and progressbar for image pushes #1590

Merged
merged 23 commits into from
Apr 18, 2023
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
8228d89
Switch to per-image tunnels and progressbar
Racer159 Apr 11, 2023
616fc5d
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 11, 2023
35ede8d
Fix connect test
Racer159 Apr 12, 2023
98cc5eb
Fix gte check
Racer159 Apr 12, 2023
d9fb4bf
Close the tunnel manually instead of defer
Racer159 Apr 12, 2023
512caf9
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 12, 2023
1a637d0
Add support for no provided tag
Racer159 Apr 12, 2023
3f30601
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 13, 2023
4814d73
Fix logic for digests and non-tagged images
Racer159 Apr 13, 2023
89ab543
Fix registry scale down behavior
Racer159 Apr 14, 2023
9d89dbb
Explain more about when state is loaded
Racer159 Apr 14, 2023
82b8a7f
Show spinners when waiting for cluster connections
Racer159 Apr 14, 2023
07f398e
Add a warning for breaking changes that may exist
Racer159 Apr 16, 2023
55bc0e0
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 16, 2023
d8aa018
Fix web ui test
Racer159 Apr 17, 2023
5132129
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 17, 2023
bbdbd8e
Address feedback
Racer159 Apr 18, 2023
a0f072c
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 18, 2023
ba39205
Fix init test
Racer159 Apr 18, 2023
8ba22dd
Resolve more test errors
Racer159 Apr 18, 2023
7b3c924
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 18, 2023
55b8bc2
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 18, 2023
6a765d6
Merge branch 'main' into 1568-registry-hanging-investigations
Racer159 Apr 18, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions packages/distros/k3s/common/zarf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ components:
only:
localOS: linux
description: >
*** REQUIRES ROOT ***
Install K3s, certified Kubernetes distribution built for IoT & Edge computing.
*** REQUIRES ROOT (not sudo) ***
Install K3s, a certified Kubernetes distribution built for IoT & Edge computing.
K3s provides the cluster need for Zarf running in Appliance Mode as well as can
host a low-resource Gitops Service if not using an existing Kubernetes platform.
actions:
Expand Down
6 changes: 5 additions & 1 deletion packages/zarf-registry/chart/templates/hpa.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,18 @@ spec:
scaleDown:
# Use 60 second stabilization window becuase zarf will freeze scale down during deploys
stabilizationWindowSeconds: 60
# Initially disable scale down - this gets set to Min later by Zarf (src/test/e2e/20_zarf_init_test.go)
selectPolicy: Disabled
# Scale down one pod per minute
policies:
- type: Pods
value: 1
periodSeconds: 60
periodSeconds: 60
scaleUp:
# Delay initial checks by 30 seconds
stabilizationWindowSeconds: 30
# Scale up as much as is needed
selectPolicy: Max
# Scale up one pod per minute
policies:
- type: Pods
Expand Down
4 changes: 4 additions & 0 deletions src/cmd/connect.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ var (
if len(args) > 0 {
target = args[0]
}
spinner := message.NewProgressSpinner("Preparing a tunnel to connect to %s", target)

tunnel, err := cluster.NewTunnel(connectNamespace, connectResourceType, connectResourceName, connectLocalPort, connectRemotePort)
if err != nil {
Expand All @@ -38,7 +39,10 @@ var (
if !cliOnly {
tunnel.EnableAutoOpen()
}

tunnel.AddSpinner(spinner)
tunnel.Connect(target, true)
spinner.Success()
},
}

Expand Down
2 changes: 1 addition & 1 deletion src/cmd/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var destroyCmd = &cobra.Command{
Short: lang.CmdDestroyShort,
Long: lang.CmdDestroyLong,
Run: func(cmd *cobra.Command, args []string) {
c, err := cluster.NewClusterWithWait(30 * time.Second)
c, err := cluster.NewClusterWithWait(30*time.Second, true)
if err != nil {
message.Fatalf(err, lang.ErrNoClusterConnection)
}
Expand Down
2 changes: 2 additions & 0 deletions src/cmd/package.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ var packageDeployCmd = &cobra.Command{
pkgClient := packager.NewOrDie(&pkgConfig)
defer pkgClient.ClearTempPaths()

pterm.Println()

// Deploy the package
if err := pkgClient.Deploy(); err != nil {
message.Fatalf(err, "Failed to deploy package: %s", err.Error())
Expand Down
1 change: 1 addition & 0 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ func GetCraneOptions(insecure bool, archs ...string) []crane.Option {
OS: "linux",
Architecture: GetArch(archs...),
}),
crane.WithUserAgent("zarf"),
)

return options
Expand Down
2 changes: 1 addition & 1 deletion src/internal/api/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func Summary(w http.ResponseWriter, _ *http.Request) {
var distro string
var hasZarf bool

c, err := cluster.NewClusterWithWait(5 * time.Second)
c, err := cluster.NewClusterWithWait(5*time.Second, false)
reachable = err == nil

if reachable {
Expand Down
28 changes: 23 additions & 5 deletions src/internal/cluster/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var labels = k8s.Labels{

// NewClusterOrDie creates a new cluster instance and waits up to 30 seconds for the cluster to be ready or throws a fatal error.
func NewClusterOrDie() *Cluster {
c, err := NewClusterWithWait(defaultTimeout)
c, err := NewClusterWithWait(defaultTimeout, true)
if err != nil {
message.Fatalf(err, "Failed to connect to cluster")
}
Expand All @@ -37,19 +37,37 @@ func NewClusterOrDie() *Cluster {
}

// NewClusterWithWait creates a new cluster instance and waits for the given timeout for the cluster to be ready.
func NewClusterWithWait(timeout time.Duration) (*Cluster, error) {
func NewClusterWithWait(timeout time.Duration, withSpinner bool) (*Cluster, error) {
var spinner *message.Spinner
if withSpinner {
spinner = message.NewProgressSpinner("Waiting for cluster connection (%s timeout)", timeout.String())
defer spinner.Stop()
}

c := &Cluster{}
var err error

c.Kube, err = k8s.New(message.Debugf, labels)
if err != nil {
return c, err
}
return c, c.Kube.WaitForHealthyCluster(timeout)

err = c.Kube.WaitForHealthyCluster(timeout)
if err != nil {
return c, err
}

if spinner != nil {
spinner.Success()
}

return c, nil
}

// NewCluster creates a new cluster instance without waiting for the cluster to be ready.
func NewCluster() (*Cluster, error) {
var err error
c := &Cluster{}
c.Kube, _ = k8s.New(message.Debugf, labels)
return c, nil
c.Kube, err = k8s.New(message.Debugf, labels)
return c, err
}
9 changes: 5 additions & 4 deletions src/internal/cluster/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ import (

// Zarf Cluster Constants.
const (
ZarfNamespace = "zarf"
ZarfStateSecretName = "zarf-state"
ZarfStateDataKey = "state"
ZarfPackageInfoLabel = "package-deploy-info"
ZarfNamespace = "zarf"
ZarfStateSecretName = "zarf-state"
ZarfStateDataKey = "state"
ZarfPackageInfoLabel = "package-deploy-info"
ZarfInitPackageInfoName = "zarf-package-init"
)

// InitZarfState initializes the Zarf state with the given temporary directory and init configs.
Expand Down
14 changes: 5 additions & 9 deletions src/internal/cluster/tunnel.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,6 @@ func (tunnel *Tunnel) establish() (string, error) {
message.Debug("tunnel.Establish()")

var err error
var spinner *message.Spinner

// Track this locally as we may need to retry if the tunnel fails.
localPort := tunnel.localPort
Expand All @@ -390,6 +389,7 @@ func (tunnel *Tunnel) establish() (string, error) {
defer globalMutex.Unlock()
}

var spinner *message.Spinner
spinnerMessage := fmt.Sprintf("Opening tunnel %d -> %d for %s/%s in namespace %s",
localPort,
tunnel.remotePort,
Expand All @@ -402,8 +402,7 @@ func (tunnel *Tunnel) establish() (string, error) {
spinner = tunnel.spinner
spinner.Updatef(spinnerMessage)
} else {
spinner = message.NewProgressSpinner(spinnerMessage)
defer spinner.Stop()
message.Debug(spinnerMessage)
}

kube, err := k8s.NewWithWait(message.Debugf, labels, defaultTimeout)
Expand Down Expand Up @@ -455,19 +454,16 @@ func (tunnel *Tunnel) establish() (string, error) {
// Wait for an error or the tunnel to be ready.
select {
case err = <-errChan:
if tunnel.spinner == nil {
spinner.Stop()
}
return "", fmt.Errorf("unable to start the tunnel: %w", err)
case <-portforwarder.Ready:
// Store for endpoint output
tunnel.localPort = localPort
url := fmt.Sprintf("http://%s:%d%s", config.IPV4Localhost, localPort, tunnel.urlSuffix)
msg := fmt.Sprintf("Creating port forwarding tunnel at %s", url)
if tunnel.spinner == nil {
spinner.Successf(msg)
if tunnel.spinner != nil {
spinner.Updatef("%s", msg)
} else {
spinner.Updatef(msg)
message.Debug(msg)
}
return url, nil
}
Expand Down
15 changes: 15 additions & 0 deletions src/internal/cluster/zarf.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,21 @@ func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, error) {
return deployedPackages, nil
}

// GetDeployedZarfInitPackage gets the metadata information about the currently deployed init package in the cluster.
// We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace.
func (c *Cluster) GetDeployedZarfInitPackage() (types.DeployedPackage, error) {
Racer159 marked this conversation as resolved.
Show resolved Hide resolved
var deployedInitPackage = types.DeployedPackage{}

// Get the secret that describes the deployed init package
secret, err := c.Kube.GetSecret(ZarfNamespace, ZarfInitPackageInfoName)
if err != nil {
return deployedInitPackage, err
}

err = json.Unmarshal(secret.Data["data"], &deployedInitPackage)
return deployedInitPackage, err
}

// StripZarfLabelsAndSecretsFromNamespaces removes metadata and secrets from existing namespaces no longer manged by Zarf.
func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() {
spinner := message.NewProgressSpinner("Removing zarf metadata & secrets from existing namespaces not managed by Zarf")
Expand Down
7 changes: 2 additions & 5 deletions src/internal/packager/images/pull.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,8 @@ func (i *ImgConfig) PullAll() error {
spinner := message.NewProgressSpinner("Loading metadata for %d images. %s", imgCount, longer)
defer spinner.Stop()

if message.GetLogLevel() >= message.DebugLevel {
spinner.EnablePreserveWrites()
logs.Warn.SetOutput(spinner)
logs.Progress.SetOutput(spinner)
}
logs.Warn.SetOutput(&message.DebugWriter{})
logs.Progress.SetOutput(&message.DebugWriter{})

for idx, src := range i.ImgList {
spinner.Updatef("Fetching image metadata (%d of %d): %s", idx+1, imgCount, src)
Expand Down
Loading