Skip to content
This repository has been archived by the owner on Jan 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #22 from vshn/k8sapi
Browse files Browse the repository at this point in the history
extract information from k8s API
  • Loading branch information
mweibel authored Nov 18, 2022
2 parents ef96451 + 3af29b6 commit 1fc7869
Show file tree
Hide file tree
Showing 8 changed files with 742 additions and 86 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ node_modules/

# Configuration
env
.env

# Work
.work/
22 changes: 22 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,25 @@
Batch job to sync usage data from the Cloudscale.ch metrics API to the [APPUiO Cloud reporting](https://github.com/appuio/appuio-cloud-reporting/) database.

See the [component documentation](https://hub.syn.tools/cloudscale-metrics-collector/index.html) for more information.

## Getting started

You'll need a working setup of [provider-cloudscale](https://github.com/vshn/provider-cloudscale/) and
[appuio-cloud-reporting](https://github.com/appuio/appuio-cloud-reporting) to be able to test this collector. Make sure to follow their READMEs accordingly.

Then, set the following env variables:
```
# how many days since now metrics should be fetched from
DAYS=2
ACR_DB_URL=postgres://reporting:reporting@localhost/appuio-cloud-reporting-test
CLOUDSCALE_API_TOKEN=<API TOKEN>
# either set server url and token
KUBERNETES_SERVER_URL=<TOKEN>
KUBERNETES_SERVER_TOKEN=<TOKEN>
# or set a KUBECONFIG - this also circumvents potential x509 certificate errors when connecting to a local cluster
KUBECONFIG=/path/to/provider-cloudscale/.kind/kind-kubeconfig-v1.24.0
```

82 changes: 63 additions & 19 deletions accumulate.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,18 @@ package main
import (
"context"
"fmt"
"github.com/cloudscale-ch/cloudscale-go-sdk/v2"
"os"
"time"

"github.com/cloudscale-ch/cloudscale-go-sdk/v2"
cloudscalev1 "github.com/vshn/provider-cloudscale/apis/cloudscale/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
organizationLabel = "appuio.io/organization"
namespaceLabel = "crossplane.io/claim-namespace"
)

// AccumulateKey represents one data point ("fact") in the billing database.
Expand All @@ -31,66 +40,101 @@ namespace are stored.
This method is "accumulating" data because it collects data from possibly multiple ObjectsUsers under the same
AccumulateKey. This is because the billing system can't handle multiple ObjectsUsers per namespace.
*/
func accumulateBucketMetrics(ctx context.Context, date time.Time, cloudscaleClient *cloudscale.Client) (map[AccumulateKey]uint64, error) {
func accumulateBucketMetrics(ctx context.Context, date time.Time, cloudscaleClient *cloudscale.Client, k8sclient client.Client) (map[AccumulateKey]uint64, error) {
bucketMetricsRequest := cloudscale.BucketMetricsRequest{Start: date, End: date}
bucketMetrics, err := cloudscaleClient.Metrics.GetBucketMetrics(ctx, &bucketMetricsRequest)
if err != nil {
return nil, err
}

nsTenants, err := fetchNamespaces(ctx, k8sclient)
if err != nil {
return nil, err
}

buckets, err := fetchBuckets(ctx, k8sclient)
if err != nil {
return nil, err
}

accumulated := make(map[AccumulateKey]uint64)

for _, bucketMetricsData := range bucketMetrics.Data {
objectsUser, err := cloudscaleClient.ObjectsUsers.Get(ctx, bucketMetricsData.Subject.ObjectsUserID)
if err != nil || objectsUser == nil {
fmt.Fprintf(os.Stderr, "WARNING: Cannot sync bucket %s, objects user %s not found\n", bucketMetricsData.Subject.BucketName, bucketMetricsData.Subject.ObjectsUserID)
name := bucketMetricsData.Subject.BucketName
ns, ok := buckets[name]
if !ok {
fmt.Fprintf(os.Stderr, "WARNING: Cannot sync bucket, bucket resource %q not found\n", name)
continue
}
err = accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, objectsUser)
tenant, ok := nsTenants[ns]
if !ok {
fmt.Fprintf(os.Stderr, "WARNING: Cannot sync bucket, namespace %q not found in map\n", ns)
continue
}

err = accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, tenant, ns)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Cannot sync bucket %s: %v\n", bucketMetricsData.Subject.BucketName, err)
fmt.Fprintf(os.Stderr, "WARNING: Cannot sync bucket %s: %v\n", name, err)
continue
}
}

return accumulated, nil
}

func accumulateBucketMetricsForObjectsUser(accumulated map[AccumulateKey]uint64, bucketMetricsData cloudscale.BucketMetricsData, objectsUser *cloudscale.ObjectsUser) error {
if len(bucketMetricsData.TimeSeries) != 1 {
return fmt.Errorf("There must be exactly one metrics data point, found %d", len(bucketMetricsData.TimeSeries))
func fetchBuckets(ctx context.Context, k8sclient client.Client) (map[string]string, error) {
buckets := &cloudscalev1.BucketList{}
if err := k8sclient.List(ctx, buckets, client.HasLabels{namespaceLabel}); err != nil {
return nil, fmt.Errorf("bucket list: %w", err)
}

tenantStr := objectsUser.Tags["tenant"]
if tenantStr == "" {
return fmt.Errorf("no tenant information found on objectsUser")
bucketNS := map[string]string{}
for _, b := range buckets.Items {
bucketNS[b.GetBucketName()] = b.Labels[namespaceLabel]
}
namespace := objectsUser.Tags["namespace"]
if namespace == "" {
return fmt.Errorf("no namespace information found on objectsUser")
return bucketNS, nil
}

func fetchNamespaces(ctx context.Context, k8sclient client.Client) (map[string]string, error) {
namespaces := &corev1.NamespaceList{}
if err := k8sclient.List(ctx, namespaces, client.HasLabels{organizationLabel}); err != nil {
return nil, fmt.Errorf("namespace list: %w", err)
}

nsTenants := map[string]string{}
for _, ns := range namespaces.Items {
nsTenants[ns.Name] = ns.Labels[organizationLabel]
}
return nsTenants, nil
}

func accumulateBucketMetricsForObjectsUser(accumulated map[AccumulateKey]uint64, bucketMetricsData cloudscale.BucketMetricsData, tenant, namespace string) error {
if len(bucketMetricsData.TimeSeries) != 1 {
return fmt.Errorf("there must be exactly one metrics data point, found %d", len(bucketMetricsData.TimeSeries))
}

// For now all the buckets have the same zone. This may change in the future if Cloudscale decides to have different
// prices for different locations.
zone := sourceZones[0]

sourceStorage := AccumulateKey{
Query: sourceQueryStorage,
Zone: zone,
Tenant: tenantStr,
Tenant: tenant,
Namespace: namespace,
Start: bucketMetricsData.TimeSeries[0].Start,
}
sourceTrafficOut := AccumulateKey{
Query: sourceQueryTrafficOut,
Zone: zone,
Tenant: tenantStr,
Tenant: tenant,
Namespace: namespace,
Start: bucketMetricsData.TimeSeries[0].Start,
}
sourceRequests := AccumulateKey{
Query: sourceQueryRequests,
Zone: zone,
Tenant: tenantStr,
Tenant: tenant,
Namespace: namespace,
Start: bucketMetricsData.TimeSeries[0].Start,
}
Expand Down
8 changes: 3 additions & 5 deletions accumulate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func assertEqualfUint64(t *testing.T, expected uint64, actual uint64, msg string

func TestAccumulateBucketMetricsForObjectsUser(t *testing.T) {
zone := "cloudscale"
tenant := "inity"
organization := "inity"
namespace := "testnamespace"

// get the correct date for a data set that was created yesterday
Expand All @@ -47,17 +47,15 @@ func TestAccumulateBucketMetricsForObjectsUser(t *testing.T) {
bucketMetricsData := cloudscale.BucketMetricsData{
TimeSeries: bucketMetricsInterval,
}
objectsUser := cloudscale.ObjectsUser{}
objectsUser.Tags = cloudscale.TagMap{"zone": zone, "tenant": tenant, "namespace": namespace}

accumulated := make(map[AccumulateKey]uint64)
assert.NoError(t, accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, &objectsUser))
assert.NoError(t, accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, organization, namespace))

require.Len(t, accumulated, 3, "incorrect amount of values 'accumulated'")

key := AccumulateKey{
Zone: zone,
Tenant: tenant,
Tenant: organization,
Namespace: namespace,
Start: date,
}
Expand Down
55 changes: 44 additions & 11 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,32 @@ go 1.18
require (
github.com/appuio/appuio-cloud-reporting v0.5.0
github.com/cloudscale-ch/cloudscale-go-sdk/v2 v2.0.1
github.com/go-logr/logr v1.2.3
github.com/go-logr/zapr v1.2.3
github.com/jmoiron/sqlx v1.3.5
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.3
go.uber.org/zap v1.21.0
github.com/vshn/provider-cloudscale v0.5.0
k8s.io/api v0.25.4
k8s.io/apimachinery v0.25.4
k8s.io/client-go v0.25.4
sigs.k8s.io/controller-runtime v0.13.1
)

require (
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/crossplane/crossplane-runtime v0.18.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.12.0 // indirect
github.com/jackc/pgio v1.0.0 // indirect
Expand All @@ -25,14 +39,33 @@ require (
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.11.0 // indirect
github.com/jackc/pgx/v4 v4.16.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/lopezator/migrator v0.3.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
github.com/spf13/afero v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.70.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
Loading

0 comments on commit 1fc7869

Please sign in to comment.