Skip to content

Commit

Permalink
Merge branches 'development/2.10' and 'w/2.9/improvement/ZENKO-4848-r…
Browse files Browse the repository at this point in the history
…emove-kubedb' into tmp/octopus/w/2.10/improvement/ZENKO-4848-remove-kubedb
  • Loading branch information
bert-e committed Sep 23, 2024
3 parents 21ba928 + 8086340 + 18ab7d4 commit 6b16465
Show file tree
Hide file tree
Showing 31 changed files with 8,886 additions and 3,700 deletions.
2 changes: 1 addition & 1 deletion .github/actions/debug-wait/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ runs:
using: composite
steps:
- name: "Debug: SSH to runner"
uses: scality/actions/action-ssh-to-runner@1.6.0
uses: scality/actions/action-ssh-to-runner@1.8.0
continue-on-error: true
with:
tmate-server-host: ${{ env.TMATE_SERVER_HOST }}
Expand Down
10 changes: 8 additions & 2 deletions .github/scripts/end2end/configs/keycloak_config.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
{
"realm" : "${OIDC_REALM}",
"enabled" : true,
"groups" : [
],
"defaultRoles" : [ "uma_authorization", "offline_access" ],
"roles": {
"realm": [
Expand All @@ -14,6 +12,14 @@
"containerId": "${OIDC_REALM}",
"attributes": {}
},
{
"id": "f3b3b3b4-1b3b-4b3b-8b3b-3b3b3b3b3b3b",
"name": "AccountTest::DataAccessor",
"composite": false,
"clientRole": false,
"containerId": "${OIDC_REALM}",
"attributes": {}
},
{
"id": "3500844c-c325-422d-a010-b674617c99f3",
"name": "AccountTest::DataConsumer",
Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/end2end/configs/zenko.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ spec:
size: 20Gi
storageClassName: "standard"
kafkaCleaner:
interval: 30s
interval: 1m
minPartitionSizeBytes: 0Mi
minBatchSize: 0
keepAtLeast: 0
Expand Down
4 changes: 3 additions & 1 deletion .github/scripts/end2end/run-e2e-ctst.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ ADMIN_PRA_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-ad
STORAGE_MANAGER_USER_NAME="ctst_storage_manager"
STORAGE_ACCOUNT_OWNER_USER_NAME="ctst_storage_account_owner"
DATA_CONSUMER_USER_NAME="ctst_data_consumer"
DATA_ACCESSOR_USER_NAME="ctst_data_accessor"
VAULT_AUTH_HOST="${ZENKO_NAME}-connector-vault-auth-api.default.svc.cluster.local"
ZENKO_PORT="80"
KEYCLOAK_TEST_USER=${OIDC_USERNAME}
Expand Down Expand Up @@ -98,6 +99,7 @@ WORLD_PARAMETERS="$(jq -c <<EOF
"StorageManagerUsername":"${STORAGE_MANAGER_USER_NAME}",
"StorageAccountOwnerUsername":"${STORAGE_ACCOUNT_OWNER_USER_NAME}",
"DataConsumerUsername":"${DATA_CONSUMER_USER_NAME}",
"DataAccessorUsername":"${DATA_ACCESSOR_USER_NAME}",
"ServiceUsersCredentials":${SERVICE_USERS_CREDENTIALS},
"AzureAccountName":"${AZURE_ACCOUNT_NAME}",
"AzureAccountKey":"${AZURE_SECRET_KEY}",
Expand Down Expand Up @@ -138,7 +140,7 @@ docker run \
--rm \
--network=host \
"${E2E_IMAGE}" /bin/bash \
-c "SUBDOMAIN=${SUBDOMAIN} CONTROL_PLANE_INGRESS_ENDPOINT=${OIDC_ENDPOINT} ACCOUNT=${ZENKO_ACCOUNT_NAME} KEYCLOAK_REALM=${KEYCLOAK_TEST_REALM_NAME} STORAGE_MANAGER=${STORAGE_MANAGER_USER_NAME} STORAGE_ACCOUNT_OWNER=${STORAGE_ACCOUNT_OWNER_USER_NAME} DATA_CONSUMER=${DATA_CONSUMER_USER_NAME} /ctst/bin/seedKeycloak.sh"; [[ $? -eq 1 ]] && exit 1 || echo 'Keycloak Configured!'
-c "SUBDOMAIN=${SUBDOMAIN} CONTROL_PLANE_INGRESS_ENDPOINT=${OIDC_ENDPOINT} ACCOUNT=${ZENKO_ACCOUNT_NAME} KEYCLOAK_REALM=${KEYCLOAK_TEST_REALM_NAME} STORAGE_MANAGER=${STORAGE_MANAGER_USER_NAME} STORAGE_ACCOUNT_OWNER=${STORAGE_ACCOUNT_OWNER_USER_NAME} DATA_CONSUMER=${DATA_CONSUMER_USER_NAME} DATA_ACCESSOR=${DATA_ACCESSOR_USER_NAME} /ctst/bin/seedKeycloak.sh"; [[ $? -eq 1 ]] && exit 1 || echo 'Keycloak Configured!'

# Grant access to Kube API (insecure, only for testing)
kubectl create clusterrolebinding serviceaccounts-cluster-admin \
Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/mocks/azure-mock.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ spec:
hostname: devstoreaccount1
subdomain: azure-mock
containers:
- image: mcr.microsoft.com/azure-storage/azurite:3.23.0
- image: mcr.microsoft.com/azure-storage/azurite:3.31.0
command: ["azurite", "-l", "/data", "--blobHost", "0.0.0.0", "--blobPort", "80", "--queueHost", "0.0.0.0","--queuePort", "81"]
name: azurite
ports:
Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/mocks/azure/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM mcr.microsoft.com/azure-storage/azurite:3.21.0
FROM mcr.microsoft.com/azure-storage/azurite:3.31.0

RUN apk add -U --no-cache python3 py3-pip ca-certificates \
&& apk add -U --no-cache --virtual .build-deps \
Expand Down
28 changes: 14 additions & 14 deletions .github/workflows/end2end.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,15 @@ env:
SUBDOMAIN: "zenko.local"
DR_SUBDOMAIN: "dr.zenko.local"
SKOPEO_PATH: "/tmp"
HELM_VERSION: "v3.5.3"
YQ_VERSION: "v4.27.5"
HELM_VERSION: "v3.15.4"
YQ_VERSION: "v4.44.3"
YQ_BINARY: "yq_linux_amd64"
KUSTOMIZE_VERSION: "v4.4.1"
GO_VERSION: "1.16.2"
SKOPEO_VERSION: "v1.5.2"
KUBECTL_VERSION: "1.30.0"
TILT_VERSION: "0.23.4"
KIND_VERSION: "v0.20.0"
KUSTOMIZE_VERSION: "v5.4.3"
GO_VERSION: "1.23.0"
SKOPEO_VERSION: "v1.16.1"
KUBECTL_VERSION: "1.31.0"
TILT_VERSION: "0.33.19"
KIND_VERSION: "v0.12.0"
ZENKO_ENABLE_SOSAPI: false
EXPIRE_ONE_DAY_EARLIER: true
TRANSITION_ONE_DAY_EARLIER: true
Expand All @@ -113,7 +113,7 @@ env:

jobs:
check-dashboard-versions:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
Expand All @@ -127,7 +127,7 @@ jobs:
run: bash ./.github/scripts/check_versions.sh

check-workflows:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -188,7 +188,7 @@ jobs:
source: ${{ env.ROOT_DOCUMENTATION_BUILDDIR }}

build-iso:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
needs: [build-kafka, check-dashboard-versions]
steps:
- name: Install dependencies
Expand Down Expand Up @@ -249,7 +249,7 @@ jobs:
source: "solution-base/_build"

build-kafka:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -300,7 +300,7 @@ jobs:
cache-to: type=gha,mode=max,scope=kafka-connect-${{ env.KAFKA_CONNECT_TAG }}

build-test-image:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -331,7 +331,7 @@ jobs:
cache-to: type=gha,mode=max,scope=end2end-test

lint-and-build-ctst:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
VERSION="2.10.0"

VERSION_SUFFIX=-preview.7
VERSION_SUFFIX=-preview.8

VERSION_FULL="${VERSION}${VERSION_SUFFIX}"
31 changes: 23 additions & 8 deletions docs/docsource/installation/install/install_xdm.rst
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,21 @@ Deploy |product|
#. Refer to *MetalK8s Operation* to create volumes for |product|.

#. Create a secret for MongoDB credentials:

.. parsed-literal::
cat <<EOF | kubectl apply -n zenko -f -
apiVersion: v1
kind: Secret
metadata:
name: mongodb-db-creds
type: Opaque
stringData:
mongodb-username: admin
mongodb-password: password
EOF
#. Create a resource for |product|:

.. parsed-literal::
Expand All @@ -235,15 +250,15 @@ Deploy |product|
version: |version|
replicas: 1
mongodb:
persistence:
volumeClaimTemplate:
size: 300Gi
storageClassName: sc-300-g
databaseName: eb1e786d-da1e-3fc5-83d2-46f083ab9764
endpoints:
- data-db-mongodb-sharded-mongos-0.data-db-mongodb-sharded.zenko.svc:27017
passwordKey: mongodb-password
provider: External
userSecretName: mongodb-db-creds
usernameKey: mongodb-username
redis:
persistence:
volumeClaimTemplate:
size: 10Gi
storageClassName: sc-10-g
provider: Zenko
kafka:
provider: Managed
persistence:
Expand Down
4 changes: 2 additions & 2 deletions solution/deps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ cloudserver:
sourceRegistry: ghcr.io/scality
dashboard: cloudserver/cloudserver-dashboards
image: cloudserver
tag: 8.8.31
tag: 8.8.32
envsubst: CLOUDSERVER_TAG
drctl:
sourceRegistry: ghcr.io/scality
Expand Down Expand Up @@ -80,7 +80,7 @@ mongodb-connector:
pensieve-api:
sourceRegistry: ghcr.io/scality
image: pensieve-api
tag: 1.6.0
tag: 1.6.1
envsubst: PENSIEVE_API_TAG
rclone:
sourceRegistry: rclone
Expand Down
68 changes: 0 additions & 68 deletions tests/ctst/.eslintrc.js

This file was deleted.

4 changes: 2 additions & 2 deletions tests/ctst/common/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,12 @@ export const s3FunctionExtraParams: { [key: string]: Record<string, unknown>[] }
}],
};

export function safeJsonParse<T>(jsonString: string): { ok: boolean, result: T | null } {
export function safeJsonParse<T>(jsonString: string): { ok: boolean, result: T | null, error?: Error | null } {
let result: T;
try {
result = JSON.parse(jsonString) as T;
} catch (err) {
return { ok: false, result: null };
return { ok: false, result: null, error: (err as Error) };
}
return { ok: true, result };
}
Expand Down
27 changes: 27 additions & 0 deletions tests/ctst/eslint.config.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import js from '@eslint/js';
import { FlatCompat } from '@eslint/eslintrc';
import tseslint from 'typescript-eslint';
import { includeIgnoreFile } from '@eslint/compat';

const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const gitignorePath = path.resolve(__dirname, '.gitignore');
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});

export default tseslint.config(
...compat.extends('scality'),
...tseslint.configs.recommended,
includeIgnoreFile(gitignorePath),
{
rules: {
// CucumberJS steps start with an uppercase
'new-cap': 'off',
},
},
);
17 changes: 16 additions & 1 deletion tests/ctst/features/pra.feature
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ Feature: PRA operations

# Check that objects are transitioned in the DR site
Given access keys for the replicated account

Then object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site
And object "obj-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site

Expand All @@ -35,11 +36,25 @@ Feature: PRA operations
Then object "obj-1" should "" be "restored" and have the storage class "e2e-cold" on "Primary" site
And object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site

# Test the readonly
When the "vault-check-seeds" cronjobs completes without error on "Primary" site
And the DATA_ACCESSOR user tries to perform PutObject on "DR" site
Then it "should not" pass Vault authentication

# Switch to failover
When I request the failover state for the DR
Then the DR sink should be in phase "Failover"

# Restore on DR site
When i restore object "obj2-1" for 2 days on "DR" site
When i restore object "obj2-1" for 200000 days on "DR" site
Then object "obj2-1" should "" be "restored" and have the storage class "e2e-cold" on "DR" site
And object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site

# Switch to failback
When I resume operations for the DR
Then the DR sink should be in phase "Running"
And object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site

# Pause / Resume DR
When I pause the DR
Then the DR source should be in phase "Paused"
Expand Down
2 changes: 1 addition & 1 deletion tests/ctst/features/quotas/CountItems.feature
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ Feature: CountItems measures the utilization metrics
Scenario Outline: Countitems runs without error and compute utilization metrics
Given an existing bucket "" "without" versioning, "without" ObjectLock "without" retention mode
And an object "" that "exists"
When the "CountItems" cronjobs completes without error
When the "count-items" cronjobs completes without error
Then the operation finished without error
Loading

0 comments on commit 6b16465

Please sign in to comment.