Skip to content

Commit

Permalink
Support cluster name and remove state configuration (#19)
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathanio authored Aug 21, 2024
2 parents f6b7f06 + 228071a commit 6349496
Show file tree
Hide file tree
Showing 13 changed files with 166 additions and 316 deletions.
4 changes: 0 additions & 4 deletions charts/dashboard/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,6 @@ configure it through the `values.yaml` file.
| deployment.revisionHistoryLimit | int | `10` | Set the number of deployments which should be kept to enable a rollback of the deployment in the event of any issues or failures |
| deployment.annotations | object | `{}` | Set any additional annotations which should be added to the Deployment resource |
| deployment.labels | object | `{}` | Set any additional labels which should be added to the Deployment resource |
| persistentVolumeClaim.create | bool | `false` | Set whether or not to create a PersistentVolumeClaim resource for the dashboard service and attach it to the Pods |
| persistentVolumeClaim.storageClassName | string | `nil` | Set the name of the StorageClass to use for the volumes in the PersistentVolumeClaim |
| persistentVolumeClaim.size | string | `"32Gi"` | Set the size of each PersistentVolumeClaim to be created |
| persistentVolumeClaim.accessModes | list | `["ReadWriteOnce"]` | Configure the access modes to be set on the PersistentVolumeClaim |
| pod.image.repository | string | `"ghcr.io/n3tuk/dashboard"` | Set the URI for the container image to be deployed for the dashboard Deployment |
| pod.image.pullPolicy | string | `"IfNotPresent"` | Set the pull policy for the host running each Pod of the deployment |
| pod.annotations | object | `{}` | Set any additional annotations which should be added to the Ingress resource |
Expand Down
3 changes: 1 addition & 2 deletions charts/dashboard/templates/deployment.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
---
{{- if (not .Values.persistentVolumeClaim.create) }}
apiVersion: apps/v1
kind: Deployment
metadata:
Expand Down Expand Up @@ -60,6 +59,7 @@ spec:
- --log-level={{ .Values.pod.logging.level }}
- --log-json={{ if .Values.pod.logging.json }}true{{ else }}false{{ end }}
- --address=0.0.0.0
- --cluster-name={{ include "dashboard.fullname" . }}
- --web-port={{ .Values.service.webPort }}
- --metrics-port={{ .Values.service.metricsPort }}
{{- range .Values.pod.extraArgs }}
Expand Down Expand Up @@ -140,4 +140,3 @@ spec:
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
156 changes: 0 additions & 156 deletions charts/dashboard/templates/stateful-set.yaml

This file was deleted.

13 changes: 0 additions & 13 deletions charts/dashboard/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,19 +100,6 @@ deployment:
# resource
labels: {}

persistentVolumeClaim:
# -- Set whether or not to create a PersistentVolumeClaim resource for the
# dashboard service and attach it to the Pods
create: false
# -- (string) Set the name of the StorageClass to use for the volumes in the
# PersistentVolumeClaim
storageClassName:
# -- Set the size of each PersistentVolumeClaim to be created
size: 32Gi
# -- Configure the access modes to be set on the PersistentVolumeClaim
accessModes:
- ReadWriteOnce

pod:
image:
# -- Set the URI for the container image to be deployed for the dashboard
Expand Down
3 changes: 3 additions & 0 deletions config/serve.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
---
cluster:
name: dashboard

endpoints:
bind:
address: 0.0.0.0
Expand Down
10 changes: 9 additions & 1 deletion internal/cmd/serve.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ const (
)

var (
// name is the defaut name for the cluster when one or more dashboard
// instances operate together.
name = "dashboard"

// host is the hostname or IPv4/IPv6 address to bind the service to on
// startup.
host = "localhost"
Expand Down Expand Up @@ -128,6 +132,10 @@ func init() {
flags.Bool("log-metrics", false, "Set whether to log metrics port requests")
_ = viper.BindPFlag("logging.metrics", flags.Lookup("log-metrics"))

viper.SetDefault("cluster.name", name)
flags.StringP("cluster-name", "n", name, "The name of the cluster")
_ = viper.BindPFlag("cluster.name", flags.Lookup("cluster-name"))

rootCmd.AddCommand(serveCmd)
}

Expand Down Expand Up @@ -167,7 +175,7 @@ func runServe(_ *cobra.Command, _ []string) error {
// Start the web service first as the metrics service will report the health
// of the service, so we should be ready to receive requests before the
// service is reporting as healthy
go w.Start(e)
go w.Start(e, m.SetWebHealth)
go m.Start(e)

// Restore default behaviour on the interrupt signal and notify user of shutdown.
Expand Down
2 changes: 2 additions & 0 deletions internal/logger/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ func Start(attrs *map[string]string) {
),
)
}

slog.SetLogLoggerLevel(level)
}

// getLevel retrieves the required logging level from either the defaults, the
Expand Down
75 changes: 58 additions & 17 deletions internal/serve/metrics/healthz/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,32 @@ import (
slogg "github.com/samber/slog-gin"
)

var shuttingDown *bool
type Health struct {
Web bool
Metrics bool
Terminating bool
}

const (
healthy = "healthy"
unhealthy = "unhealthy"
terminating = "terminating"
)

var health *Health

func NewHealth() *Health {
return &Health{
Web: false,
Metrics: false,
Terminating: false,
}
}

// Attach takes a reference to the Gin engine and attaches all the expected
// endpoints which cam be used by clients through this package.
func Attach(r *gin.Engine, shutdown *bool) {
shuttingDown = shutdown
func Attach(r *gin.Engine, h *Health) {
health = h

r.GET("/healthz", healthz)
}
Expand All @@ -24,19 +44,40 @@ func Attach(r *gin.Engine, shutdown *bool) {
// on their overall status, allowing the service to be marked as unhealthy and
// to stop processing further requests if there are known issues.
func healthz(c *gin.Context) {
if shuttingDown == nil || *shuttingDown {
slogg.AddCustomAttributes(c, slog.Group("healthz", slog.String("status", "not-ok")))
c.JSON(http.StatusGone, gin.H{
"status": "shutting-down",
"database": "unknown",
"queue": "unknown",
})
} else {
slogg.AddCustomAttributes(c, slog.Group("healthz", slog.String("status", "ok")))
c.JSON(http.StatusOK, gin.H{
"status": "healthy",
"database": "unknown",
"queue": "unknown",
})
code := http.StatusOK
status := healthy
web := healthy
metrics := healthy

if !health.Web {
code = http.StatusServiceUnavailable
status = unhealthy
web = unhealthy
}

if !health.Metrics {
code = http.StatusServiceUnavailable
status = unhealthy
metrics = unhealthy
}

if health.Terminating {
code = http.StatusGone
status = terminating
}

slogg.AddCustomAttributes(c,
slog.Group("healthz",
slog.Int("code", code),
slog.String("status", status),
slog.String("web", web),
slog.String("metrics", metrics),
),
)

c.JSON(code, gin.H{
"status": status,
"web": web,
"metrics": metrics,
})
}
Loading

0 comments on commit 6349496

Please sign in to comment.