diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..1f46f79 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,61 @@ +# +# Copyright 2021 OpsMx, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Install the latest versions of our mods. This is done as a separate step +# so it will pull from an image cache if possible, unless there are changes. +# + +FROM --platform=linux/amd64 golang:alpine AS buildmod +RUN mkdir /build +WORKDIR /build +COPY go.mod . +COPY go.sum . +RUN go mod download + +# +# Compile the code. +# +FROM buildmod AS build-binaries +COPY . . +ARG GIT_BRANCH +ARG GIT_HASH +ARG BUILD_TYPE +ARG TARGETOS +ARG TARGETARCH +ENV GIT_BRANCH=${GIT_BRANCH} GIT_HASH=${GIT_HASH} BUILD_TYPE=${BUILD_TYPE} CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} +RUN mkdir /out +RUN go build -o /out/upgrade-script -ldflags="-X 'github.com/OpsMx/go-app-base/version.buildType=${BUILD_TYPE}' -X 'github.com/OpsMx/go-app-base/version.gitHash=${GIT_HASH}' -X 'github.com/OpsMx/go-app-base/version.gitBranch=${GIT_BRANCH}'" . + +# +# Establish a base OS image used by all the applications. +# +FROM alpine:3 AS base-image +RUN apk update \ + && apk upgrade \ + && apk add ca-certificates curl jq bash git \ + && rm -rf /var/cache/apk/* +WORKDIR /app +COPY docker/run.sh /app/run.sh +ENTRYPOINT ["/bin/sh", "/app/run.sh"] + +# +# Build the Upgrade-Script image. This should be a --target on docker build. +# +FROM base-image AS upgrade-script-image +COPY --from=build-binaries /out/upgrade-script /app +CMD ["/app/upgrade-script"] + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..f44f9bf --- /dev/null +++ b/Makefile @@ -0,0 +1,105 @@ +# +# Copyright 2021-2022 OpsMx, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +TARGETS=test local +PLATFORM=linux/amd64,linux/arm64 +BUILDX=docker buildx build --pull --platform ${PLATFORM} +IMAGE_PREFIX=docker.flame.org/library/ + +# +# Build targets. Adding to these will cause magic to occur. +# + +# These are the targets for Docker images. +# Dockerfiles should have a target that ends in -image +IMAGE_TARGETS = upgrade-script + +# +# Below here lies magic... +# + +all_deps := $(shell find * -name '*.go' | grep -v _test) + +now := $(shell date -u +%Y%m%dT%H%M%S) + +# +# Default target. +# + +.PHONY: all +all: ${TARGETS} + +# +# make a buildtime directory to hold the build timestamp files +# +buildtime: + [ ! -d buildtime ] && mkdir buildtime + +# +# set git info details +# +set-git-info: + @$(eval GIT_BRANCH=$(shell git describe --tags)) + @$(eval GIT_HASH=$(shell git rev-parse ${GIT_BRANCH})) + + +# +# Multi-architecture image builds +# +.PHONY: images +images: buildtime clean-image-names set-git-info $(addsuffix .tstamp, $(addprefix buildtime/,$(IMAGE_TARGETS))) + +buildtime/%.tstamp:: ${all_deps} Dockerfile + ${BUILDX} \ + --tag ${IMAGE_PREFIX}$(patsubst %.tstamp,%,$(@F)):latest \ + --tag ${IMAGE_PREFIX}$(patsubst %.tstamp,%,$(@F)):${GIT_BRANCH} \ + --target $(patsubst %.tstamp,%,$(@F))-image \ + --build-arg GIT_HASH=${GIT_HASH} \ + --build-arg GIT_BRANCH=${GIT_BRANCH} \ + --build-arg BUILD_TYPE=release \ + -f Dockerfile \ + --push . + echo >> buildtime/image-names.txt ${IMAGE_PREFIX}$(patsubst %.tstamp,%,$(@F)):latest + echo >> buildtime/image-names.txt ${IMAGE_PREFIX}$(patsubst %.tstamp,%,$(@F)):${GIT_BRANCH} + @touch $@ + +.PHONY: image-names +image-names: + [ -n "${GITHUB_OUTPUT}" ] && echo imageNames=$(shell echo `cat buildtime/image-names.txt` | sed 's/\ /,\ /g') >> ${GITHUB_OUTPUT} + +# +# Test targets +# + +.PHONY: test +test: + go test -race ./... + +# +# Clean the world. +# + +.PHONY: clean +clean: clean-image-names + rm -f buildtime/*.tstamp + rm -f bin/* + +.PHONY: really-clean +really-clean: clean + +.PHONY: clean-image-names +clean-image-names: + rm -f buildtime/image-names.txt diff --git a/april2024june2024/appLevelTools.go b/april2024june2024/appLevelTools.go new file mode 100644 index 0000000..9db44ce --- /dev/null +++ b/april2024june2024/appLevelTools.go @@ -0,0 +1,50 @@ +package april2024june2024 + +import ( + "context" + "fmt" + "upgradationScript/april2024june2024/june2024" + + "upgradationScript/logger" + + "github.com/Khan/genqlient/graphql" +) + +func populateAppLevelTools(prodDgraphClient graphql.Client) error { + ctx := context.Background() + + logger.Logger.Debug("--------------Populating App Env Tools Data transition-----------------") + + appEnvs, err := june2024.AppEnvTools(ctx, prodDgraphClient) + if err != nil { + return fmt.Errorf("populateAppLevelTools: could'nt query RunhistoriesData error: %s", err.Error()) + } + + for _, appEnv := range appEnvs.QueryApplicationEnvironment { + logger.Logger.Debug("---------------------------------------------") + logger.Sl.Debugf("App Env Tools to be populated for id %v", appEnv.Id) + + tools := []string{} + + for _, deployment := range appEnv.Deployments { + logger.Sl.Debugf("Gathering Tools used in policy checks for deployment id %v", deployment.Id) + for _, runHistory := range deployment.PolicyRunHistory { + logger.Sl.Debugf("Tool used in policy run history id: %v is %v", runHistory.Id, runHistory.DatasourceTool) + tools = AppendIfNotPresent(tools, runHistory.DatasourceTool) + } + } + + logger.Sl.Debugf("App Env Tools to be populated with tools %v for id %v", tools, appEnv.Id) + + if _, err := june2024.UpdateApplicationEnvironmentWithTools(ctx, prodDgraphClient, appEnv.Id, tools); err != nil { + return fmt.Errorf("populateAppLevelTools: UpdateApplicationEnvironmentWithTools error: %s", err.Error()) + } + + logger.Sl.Debugf("added tools for AppEnv Id %v successfully", appEnv.Id) + logger.Logger.Debug("---------------------------------------------") + } + + logger.Logger.Debug("--------------Completed App Env Tools Data transition-----------------") + + return nil +} diff --git a/april2024june2024/april2024/genqlient.yaml b/april2024june2024/april2024/genqlient.yaml new file mode 100644 index 0000000..7b7db8e --- /dev/null +++ b/april2024june2024/april2024/genqlient.yaml @@ -0,0 +1,17 @@ +schema: schema.graphql +operations: +- queries.graphql +generated: schema-generated.go +package: april2024 +use_struct_references: true +bindings: + Boolean: + type: "*bool" + DateTime: + type: "*time.Time" + Int64: + type: int64 + Int: + type: "*int" + ID: + type: "*string" diff --git a/april2024june2024/april2024/queries.graphql b/april2024june2024/april2024/queries.graphql new file mode 100644 index 0000000..9eaa57e --- /dev/null +++ b/april2024june2024/april2024/queries.graphql @@ -0,0 +1,16 @@ +query QueryRunHistory { + queryRunHistory(order: { asc: CreatedAt }, filter: { Pass: false }) { + id + AlertTitle + AlertMessage + Suggestions + Error + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + } +} \ No newline at end of file diff --git a/april2024june2024/april2024/schema-generated.go b/april2024june2024/april2024/schema-generated.go new file mode 100644 index 0000000..7f7d8a7 --- /dev/null +++ b/april2024june2024/april2024/schema-generated.go @@ -0,0 +1,126 @@ +// Code generated by github.com/Khan/genqlient, DO NOT EDIT. + +package april2024 + +import ( + "context" + "time" + + "github.com/Khan/genqlient/graphql" +) + +// QueryRunHistoryQueryRunHistory includes the requested fields of the GraphQL type RunHistory. +type QueryRunHistoryQueryRunHistory struct { + Id *string `json:"id"` + AlertTitle string `json:"AlertTitle"` + AlertMessage string `json:"AlertMessage"` + Suggestions string `json:"Suggestions"` + Error string `json:"Error"` + Severity Severity `json:"Severity"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + Action string `json:"Action"` + JiraUrl string `json:"JiraUrl"` + Status string `json:"Status"` + Reason string `json:"Reason"` +} + +// GetId returns QueryRunHistoryQueryRunHistory.Id, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetId() *string { return v.Id } + +// GetAlertTitle returns QueryRunHistoryQueryRunHistory.AlertTitle, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetAlertTitle() string { return v.AlertTitle } + +// GetAlertMessage returns QueryRunHistoryQueryRunHistory.AlertMessage, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetAlertMessage() string { return v.AlertMessage } + +// GetSuggestions returns QueryRunHistoryQueryRunHistory.Suggestions, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetSuggestions() string { return v.Suggestions } + +// GetError returns QueryRunHistoryQueryRunHistory.Error, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetError() string { return v.Error } + +// GetSeverity returns QueryRunHistoryQueryRunHistory.Severity, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetSeverity() Severity { return v.Severity } + +// GetCreatedAt returns QueryRunHistoryQueryRunHistory.CreatedAt, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns QueryRunHistoryQueryRunHistory.UpdatedAt, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetAction returns QueryRunHistoryQueryRunHistory.Action, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetAction() string { return v.Action } + +// GetJiraUrl returns QueryRunHistoryQueryRunHistory.JiraUrl, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetJiraUrl() string { return v.JiraUrl } + +// GetStatus returns QueryRunHistoryQueryRunHistory.Status, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetStatus() string { return v.Status } + +// GetReason returns QueryRunHistoryQueryRunHistory.Reason, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryQueryRunHistory) GetReason() string { return v.Reason } + +// QueryRunHistoryResponse is returned by QueryRunHistory on success. +type QueryRunHistoryResponse struct { + QueryRunHistory []*QueryRunHistoryQueryRunHistory `json:"queryRunHistory"` +} + +// GetQueryRunHistory returns QueryRunHistoryResponse.QueryRunHistory, and is useful for accessing the field via an interface. +func (v *QueryRunHistoryResponse) GetQueryRunHistory() []*QueryRunHistoryQueryRunHistory { + return v.QueryRunHistory +} + +type Severity string + +const ( + SeverityCritical Severity = "critical" + SeverityHigh Severity = "high" + SeverityMedium Severity = "medium" + SeverityLow Severity = "low" + SeverityInfo Severity = "info" + SeverityNone Severity = "none" + SeverityUnknown Severity = "unknown" +) + +// The query or mutation executed by QueryRunHistory. +const QueryRunHistory_Operation = ` +query QueryRunHistory { + queryRunHistory(order: {asc:CreatedAt}, filter: {Pass:false}) { + id + AlertTitle + AlertMessage + Suggestions + Error + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + } +} +` + +func QueryRunHistory( + ctx_ context.Context, + client_ graphql.Client, +) (*QueryRunHistoryResponse, error) { + req_ := &graphql.Request{ + OpName: "QueryRunHistory", + Query: QueryRunHistory_Operation, + } + var err_ error + + var data_ QueryRunHistoryResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} diff --git a/april2024june2024/april2024/schema.graphql b/april2024june2024/april2024/schema.graphql new file mode 100644 index 0000000..0e58af5 --- /dev/null +++ b/april2024june2024/april2024/schema.graphql @@ -0,0 +1,4378 @@ +directive @search(by: [DgraphIndex!]) on FIELD_DEFINITION + +directive @cacheControl(maxAge: Int!) on QUERY + +directive @hasInverse(field: String!) on FIELD_DEFINITION + +directive @withSubscription on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @secret(field: String!, pred: String) on OBJECT | INTERFACE + +directive @auth(password: AuthRule, query: AuthRule, add: AuthRule, update: AuthRule, delete: AuthRule) on OBJECT | INTERFACE + +directive @custom(http: CustomHTTP, dql: String) on FIELD_DEFINITION + +directive @remote on OBJECT | INTERFACE | UNION | INPUT_OBJECT | ENUM + +directive @dgraph(type: String, pred: String) on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @id(interface: Boolean) on FIELD_DEFINITION + +directive @generate(query: GenerateQueryParams, mutation: GenerateMutationParams, subscription: Boolean) on OBJECT | INTERFACE + +directive @cascade(fields: [String]) on FIELD + +directive @lambda on FIELD_DEFINITION + +directive @lambdaOnMutate(add: Boolean, update: Boolean, delete: Boolean) on OBJECT | INTERFACE + +directive @remoteResponse(name: String) on FIELD_DEFINITION + +input AddApplicationDeploymentInput { + """id is randomly assigned""" + id: String! + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef! + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +type AddApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input AddApplicationDeploymentRiskInput { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef! +} + +type AddApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input AddApplicationEnvironmentInput { + """id is randomly assigned""" + id: String! + environment: EnvironmentRef + application: ApplicationRef! + deploymentTarget: DeploymentTargetRef! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: String + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +type AddApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input AddApplicationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef! + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +type AddApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input AddApplicationRiskStatusInput { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironmentRef! +} + +type AddApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input AddArtifactInput { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData: [ArtifactScanDataRef!] +} + +type AddArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input AddArtifactScanDataInput { + id: String! + artifactSha: String! + tool: String! + artifactDetails: ArtifactRef + lastScannedAt: DateTime + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int +} + +type AddArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input AddBuildToolInput { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime! +} + +type AddBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input AddCommitMetaDataInput { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef! +} + +type AddCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input AddComponentInput { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +type AddComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input AddCredentialsInput { + data: String! + integrator: IntegratorRef! +} + +type AddCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input AddCVSSInput { + type: String + score: String +} + +type AddCVSSPayload { + cVSS(filter: CVSSFilter, order: CVSSOrder, first: Int, offset: Int): [CVSS] + numUids: Int +} + +input AddCWEInput { + id: String! + name: String! + description: String +} + +type AddCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input AddDeploymentTargetInput { + """id is randomly assigned""" + id: String! + name: String! + ip: String! + isFirewall: Boolean + organization: OrganizationRef! + defaultEnvironment: EnvironmentRef! +} + +type AddDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input AddEnvironmentInput { + id: String! + organization: OrganizationRef! + purpose: String! +} + +type AddEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input AddFeatureModeInput { + id: String! + organization: OrganizationRef! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input AddIntegratorInput { + id: String! + organization: OrganizationRef! + name: String! + type: String! + category: String! + credentials: CredentialsRef! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input AddKeyValueInput { + id: String! + name: String! + value: String! +} + +type AddKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input AddOrganizationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type AddOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input AddPolicyDefinitionInput { + id: String! + ownerOrg: OrganizationRef! + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type AddPolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input AddPolicyEnforcementInput { + policy: PolicyDefinitionRef! + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddPolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input AddRoleInput { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type AddRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input AddRunHistoryInput { + policyId: String! + applicationDeployment: ApplicationDeploymentRef! + PolicyName: String! + Severity: Severity! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + Status: String! + DatasourceTool: String! + AlertTitle: String + AlertMessage: String + Suggestions: String + Reason: String + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Action: String! + Hash: String + Error: String + Pass: Boolean! + MetaData: String + FileApi: String + JiraUrl: String + scheduledPolicy: Boolean! + policyEnforcements: PolicyEnforcementRef! +} + +type AddRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input AddSchemaVersionInput { + version: String! +} + +type AddSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input AddSourceCodeToolInput { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef! +} + +type AddSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input AddTagInput { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcementRef!] +} + +type AddTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input AddTeamInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + organization: OrganizationRef! + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type AddTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input AddToolsUsedInput { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type AddToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input AddVulnerabilityInput { + id: String! + parent: String! + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: [CVSSRef!] + affects: [ComponentRef!] +} + +type AddVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Application implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + environments(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment!] + team(filter: TeamFilter): Team! + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + environmentsAggregate(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + """id is randomly assigned""" + id: String! + + """artifact that is deployed""" + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact!] + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + + """ + toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools + """ + toolsUsed(filter: ToolsUsedFilter): ToolsUsed! + + """deploymentRisk is the risk status of the deployment""" + deploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRisk + + """policyRunHistory is the policy execution history for this deployment""" + policyRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + artifactAggregate(filter: ArtifactFilter): ArtifactAggregateResult + policyRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ApplicationDeploymentAggregateResult { + count: Int + idMin: String + idMax: String + deployedAtMin: DateTime + deployedAtMax: DateTime + sourceMin: String + sourceMax: String + componentMin: String + componentMax: String + deployedByMin: String + deployedByMax: String +} + +input ApplicationDeploymentFilter { + id: StringHashFilter + deployedAt: DateTimeFilter + deploymentStage: DeploymentStage_exact + component: StringExactFilter + has: [ApplicationDeploymentHasFilter] + and: [ApplicationDeploymentFilter] + or: [ApplicationDeploymentFilter] + not: ApplicationDeploymentFilter +} + +enum ApplicationDeploymentHasFilter { + id + artifact + applicationEnvironment + deployedAt + deploymentStage + source + component + deployedBy + toolsUsed + deploymentRisk + policyRunHistory +} + +input ApplicationDeploymentOrder { + asc: ApplicationDeploymentOrderable + desc: ApplicationDeploymentOrderable + then: ApplicationDeploymentOrder +} + +enum ApplicationDeploymentOrderable { + id + deployedAt + source + component + deployedBy +} + +input ApplicationDeploymentPatch { + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +input ApplicationDeploymentRef { + """id is randomly assigned""" + id: String + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! +} + +type ApplicationDeploymentRiskAggregateResult { + count: Int + sourceCodeAlertsScoreMin: Int + sourceCodeAlertsScoreMax: Int + sourceCodeAlertsScoreSum: Int + sourceCodeAlertsScoreAvg: Float + buildAlertsScoreMin: Int + buildAlertsScoreMax: Int + buildAlertsScoreSum: Int + buildAlertsScoreAvg: Float + artifactAlertsScoreMin: Int + artifactAlertsScoreMax: Int + artifactAlertsScoreSum: Int + artifactAlertsScoreAvg: Float + deploymentAlertsScoreMin: Int + deploymentAlertsScoreMax: Int + deploymentAlertsScoreSum: Int + deploymentAlertsScoreAvg: Float +} + +input ApplicationDeploymentRiskFilter { + id: [ID!] + has: [ApplicationDeploymentRiskHasFilter] + and: [ApplicationDeploymentRiskFilter] + or: [ApplicationDeploymentRiskFilter] + not: ApplicationDeploymentRiskFilter +} + +enum ApplicationDeploymentRiskHasFilter { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore + deploymentRiskStatus + applicationDeployment +} + +input ApplicationDeploymentRiskOrder { + asc: ApplicationDeploymentRiskOrderable + desc: ApplicationDeploymentRiskOrderable + then: ApplicationDeploymentRiskOrder +} + +enum ApplicationDeploymentRiskOrderable { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore +} + +input ApplicationDeploymentRiskPatch { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +input ApplicationDeploymentRiskRef { + id: ID + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment { + """id is randomly assigned""" + id: String! + + """environment denotes whether it is dev, prod, staging, non-prod etc""" + environment(filter: EnvironmentFilter): Environment + application(filter: ApplicationFilter): Application! + deploymentTarget(filter: DeploymentTargetFilter): DeploymentTarget! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: String + deployments(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + riskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatus + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + deploymentsAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationEnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + namespaceMin: String + namespaceMax: String + toolsUsedMin: String + toolsUsedMax: String +} + +input ApplicationEnvironmentFilter { + id: StringHashFilter + namespace: StringExactFilter + has: [ApplicationEnvironmentHasFilter] + and: [ApplicationEnvironmentFilter] + or: [ApplicationEnvironmentFilter] + not: ApplicationEnvironmentFilter +} + +enum ApplicationEnvironmentHasFilter { + id + environment + application + deploymentTarget + namespace + toolsUsed + deployments + riskStatus + metadata +} + +input ApplicationEnvironmentOrder { + asc: ApplicationEnvironmentOrderable + desc: ApplicationEnvironmentOrderable + then: ApplicationEnvironmentOrder +} + +enum ApplicationEnvironmentOrderable { + id + namespace + toolsUsed +} + +input ApplicationEnvironmentPatch { + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: String + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationEnvironmentRef { + """id is randomly assigned""" + id: String + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: String + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationFilter { + id: StringHashFilter + name: StringExactFilter + has: [ApplicationHasFilter] + and: [ApplicationFilter] + or: [ApplicationFilter] + not: ApplicationFilter +} + +enum ApplicationHasFilter { + id + name + roles + environments + team + policies + policyEnforcements + metadata +} + +input ApplicationOrder { + asc: ApplicationOrderable + desc: ApplicationOrderable + then: ApplicationOrder +} + +enum ApplicationOrderable { + id + name +} + +input ApplicationPatch { + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +input ApplicationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! +} + +type ApplicationRiskStatusAggregateResult { + count: Int + sourceCodeAlertsMin: Int + sourceCodeAlertsMax: Int + sourceCodeAlertsSum: Int + sourceCodeAlertsAvg: Float + buildAlertsMin: Int + buildAlertsMax: Int + buildAlertsSum: Int + buildAlertsAvg: Float + artifactAlertsMin: Int + artifactAlertsMax: Int + artifactAlertsSum: Int + artifactAlertsAvg: Float + deploymentAlertsMin: Int + deploymentAlertsMax: Int + deploymentAlertsSum: Int + deploymentAlertsAvg: Float + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input ApplicationRiskStatusFilter { + id: [ID!] + riskStatus: RiskStatus_hash + has: [ApplicationRiskStatusHasFilter] + and: [ApplicationRiskStatusFilter] + or: [ApplicationRiskStatusFilter] + not: ApplicationRiskStatusFilter +} + +enum ApplicationRiskStatusHasFilter { + riskStatus + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt + applicationEnvironment +} + +input ApplicationRiskStatusOrder { + asc: ApplicationRiskStatusOrderable + desc: ApplicationRiskStatusOrderable + then: ApplicationRiskStatusOrder +} + +enum ApplicationRiskStatusOrderable { + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt +} + +input ApplicationRiskStatusPatch { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +input ApplicationRiskStatusRef { + id: ID + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +type Artifact { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + scanDataAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ArtifactAggregateResult { + count: Int + idMin: String + idMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactNameMin: String + artifactNameMax: String + artifactTagMin: String + artifactTagMax: String + artifactShaMin: String + artifactShaMax: String +} + +input ArtifactFilter { + id: StringHashFilter + artifactType: StringExactFilter + artifactName: StringExactFilter + artifactTag: StringExactFilter + artifactSha: StringExactFilter + has: [ArtifactHasFilter] + and: [ArtifactFilter] + or: [ArtifactFilter] + not: ArtifactFilter +} + +enum ArtifactHasFilter { + id + artifactType + artifactName + artifactTag + artifactSha + scanData +} + +input ArtifactOrder { + asc: ArtifactOrderable + desc: ArtifactOrderable + then: ArtifactOrder +} + +enum ArtifactOrderable { + id + artifactType + artifactName + artifactTag + artifactSha +} + +input ArtifactPatch { + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] +} + +input ArtifactRef { + id: String + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] +} + +type ArtifactScanData { + id: String! + artifactSha: String! + tool: String! + artifactDetails(filter: ArtifactFilter): Artifact + lastScannedAt: DateTime + components(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + componentsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type ArtifactScanDataAggregateResult { + count: Int + idMin: String + idMax: String + artifactShaMin: String + artifactShaMax: String + toolMin: String + toolMax: String + lastScannedAtMin: DateTime + lastScannedAtMax: DateTime + vulnCriticalCountMin: Int + vulnCriticalCountMax: Int + vulnCriticalCountSum: Int + vulnCriticalCountAvg: Float + vulnHighCountMin: Int + vulnHighCountMax: Int + vulnHighCountSum: Int + vulnHighCountAvg: Float + vulnMediumCountMin: Int + vulnMediumCountMax: Int + vulnMediumCountSum: Int + vulnMediumCountAvg: Float + vulnLowCountMin: Int + vulnLowCountMax: Int + vulnLowCountSum: Int + vulnLowCountAvg: Float + vulnInfoCountMin: Int + vulnInfoCountMax: Int + vulnInfoCountSum: Int + vulnInfoCountAvg: Float + vulnUnknownCountMin: Int + vulnUnknownCountMax: Int + vulnUnknownCountSum: Int + vulnUnknownCountAvg: Float + vulnNoneCountMin: Int + vulnNoneCountMax: Int + vulnNoneCountSum: Int + vulnNoneCountAvg: Float + vulnTotalCountMin: Int + vulnTotalCountMax: Int + vulnTotalCountSum: Int + vulnTotalCountAvg: Float +} + +input ArtifactScanDataFilter { + id: StringHashFilter + artifactSha: StringExactFilter + tool: StringExactFilter + vulnCriticalCount: IntFilter + vulnHighCount: IntFilter + vulnMediumCount: IntFilter + vulnLowCount: IntFilter + vulnInfoCount: IntFilter + vulnUnknownCount: IntFilter + vulnNoneCount: IntFilter + vulnTotalCount: IntFilter + has: [ArtifactScanDataHasFilter] + and: [ArtifactScanDataFilter] + or: [ArtifactScanDataFilter] + not: ArtifactScanDataFilter +} + +enum ArtifactScanDataHasFilter { + id + artifactSha + tool + artifactDetails + lastScannedAt + components + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount +} + +input ArtifactScanDataOrder { + asc: ArtifactScanDataOrderable + desc: ArtifactScanDataOrderable + then: ArtifactScanDataOrder +} + +enum ArtifactScanDataOrderable { + id + artifactSha + tool + lastScannedAt + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount +} + +input ArtifactScanDataPatch { + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int +} + +input ArtifactScanDataRef { + id: String + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int +} + +input AuthRule { + and: [AuthRule] + or: [AuthRule] + not: AuthRule + rule: String +} + +"""BuildTool contains data from build tool events.""" +type BuildTool { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + + """artifactNode links a BuildTool node to an artifact""" + artifactNode(filter: ArtifactFilter): Artifact + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + + """sourceCodeTool links a BuildTool node to the source details""" + sourceCodeTool(filter: SourceCodeToolFilter): SourceCodeTool + + """commitMetaData links a BuildTool node to the git commit based details""" + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData!] + createdAt: DateTime! + commitMetaDataAggregate(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult +} + +type BuildToolAggregateResult { + count: Int + idMin: String + idMax: String + buildIdMin: String + buildIdMax: String + toolMin: String + toolMax: String + buildNameMin: String + buildNameMax: String + buildUrlMin: String + buildUrlMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactMin: String + artifactMax: String + artifactTagMin: String + artifactTagMax: String + digestMin: String + digestMax: String + buildDigestMin: String + buildDigestMax: String + buildTimeMin: DateTime + buildTimeMax: DateTime + buildUserMin: String + buildUserMax: String + createdAtMin: DateTime + createdAtMax: DateTime +} + +input BuildToolFilter { + id: StringHashFilter + buildId: StringExactFilter + tool: StringExactFilter + buildName: StringExactFilter + buildUrl: StringExactFilter + artifactType: StringExactFilter + artifact: StringExactFilter + artifactTag: StringExactFilter + digest: StringExactFilter + buildDigest: StringExactFilter + has: [BuildToolHasFilter] + and: [BuildToolFilter] + or: [BuildToolFilter] + not: BuildToolFilter +} + +enum BuildToolHasFilter { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + artifactNode + buildTime + buildUser + sourceCodeTool + commitMetaData + createdAt +} + +input BuildToolOrder { + asc: BuildToolOrderable + desc: BuildToolOrderable + then: BuildToolOrder +} + +enum BuildToolOrderable { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + buildTime + buildUser + createdAt +} + +input BuildToolPatch { + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +input BuildToolRef { + """id is randomly assigned""" + id: String + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + """id is randomly assigned""" + id: ID! + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool(filter: BuildToolFilter): BuildTool! +} + +type CommitMetaDataAggregateResult { + count: Int + commitMin: String + commitMax: String + repositoryMin: String + repositoryMax: String + noOfReviewersConfMin: Int + noOfReviewersConfMax: Int + noOfReviewersConfSum: Int + noOfReviewersConfAvg: Float +} + +input CommitMetaDataFilter { + id: [ID!] + has: [CommitMetaDataHasFilter] + and: [CommitMetaDataFilter] + or: [CommitMetaDataFilter] + not: CommitMetaDataFilter +} + +enum CommitMetaDataHasFilter { + commit + repository + commitSign + noOfReviewersConf + reviewerList + approverList + buildTool +} + +input CommitMetaDataOrder { + asc: CommitMetaDataOrderable + desc: CommitMetaDataOrderable + then: CommitMetaDataOrder +} + +enum CommitMetaDataOrderable { + commit + repository + noOfReviewersConf +} + +input CommitMetaDataPatch { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +input CommitMetaDataRef { + """id is randomly assigned""" + id: ID + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +type Component { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability!] + artifacts(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + vulnerabilitiesAggregate(filter: VulnerabilityFilter): VulnerabilityAggregateResult + artifactsAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ComponentAggregateResult { + count: Int + idMin: String + idMax: String + typeMin: String + typeMax: String + nameMin: String + nameMax: String + versionMin: String + versionMax: String + purlMin: String + purlMax: String + cpeMin: String + cpeMax: String + scannedAtMin: DateTime + scannedAtMax: DateTime +} + +input ComponentFilter { + id: StringHashFilter + name: StringExactFilter + version: StringExactFilter + purl: StringExactFilter + cpe: StringExactFilter + has: [ComponentHasFilter] + and: [ComponentFilter] + or: [ComponentFilter] + not: ComponentFilter +} + +enum ComponentHasFilter { + id + type + name + version + licenses + purl + cpe + scannedAt + vulnerabilities + artifacts +} + +input ComponentOrder { + asc: ComponentOrderable + desc: ComponentOrderable + then: ComponentOrder +} + +enum ComponentOrderable { + id + type + name + version + purl + cpe + scannedAt +} + +input ComponentPatch { + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ComponentRef { + id: String + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ContainsFilter { + point: PointRef + polygon: PolygonRef +} + +type Credentials { + id: ID! + data: String! + integrator(filter: IntegratorFilter): Integrator! +} + +type CredentialsAggregateResult { + count: Int + dataMin: String + dataMax: String +} + +input CredentialsFilter { + id: [ID!] + has: [CredentialsHasFilter] + and: [CredentialsFilter] + or: [CredentialsFilter] + not: CredentialsFilter +} + +enum CredentialsHasFilter { + data + integrator +} + +input CredentialsOrder { + asc: CredentialsOrderable + desc: CredentialsOrderable + then: CredentialsOrder +} + +enum CredentialsOrderable { + data +} + +input CredentialsPatch { + data: String + integrator: IntegratorRef +} + +input CredentialsRef { + id: ID + data: String + integrator: IntegratorRef +} + +input CustomHTTP { + url: String! + method: HTTPMethod! + body: String + graphql: String + mode: Mode + forwardHeaders: [String!] + secretHeaders: [String!] + introspectionHeaders: [String!] + skipIntrospection: Boolean +} + +type CVSS { + type: String + score: String +} + +type CVSSAggregateResult { + count: Int + typeMin: String + typeMax: String + scoreMin: String + scoreMax: String +} + +input CVSSFilter { + has: [CVSSHasFilter] + and: [CVSSFilter] + or: [CVSSFilter] + not: CVSSFilter +} + +enum CVSSHasFilter { + type + score +} + +input CVSSOrder { + asc: CVSSOrderable + desc: CVSSOrderable + then: CVSSOrder +} + +enum CVSSOrderable { + type + score +} + +input CVSSPatch { + type: String + score: String +} + +input CVSSRef { + type: String + score: String +} + +type CWE { + id: String! + name: String! + description: String +} + +type CWEAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + descriptionMin: String + descriptionMax: String +} + +input CWEFilter { + id: StringHashFilter + has: [CWEHasFilter] + and: [CWEFilter] + or: [CWEFilter] + not: CWEFilter +} + +enum CWEHasFilter { + id + name + description +} + +input CWEOrder { + asc: CWEOrderable + desc: CWEOrderable + then: CWEOrder +} + +enum CWEOrderable { + id + name + description +} + +input CWEPatch { + name: String + description: String +} + +input CWERef { + id: String + name: String + description: String +} + +""" +The DateTime scalar type represents date and time as a string in RFC3339 format. +For example: "1985-04-12T23:20:50.52Z" represents 20 mins 50.52 secs after the 23rd hour of Apr 12th 1985 in UTC. +""" +scalar DateTime + +input DateTimeFilter { + eq: DateTime + in: [DateTime] + le: DateTime + lt: DateTime + ge: DateTime + gt: DateTime + between: DateTimeRange +} + +input DateTimeRange { + min: DateTime! + max: DateTime! +} + +type DeleteApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + msg: String + numUids: Int +} + +type DeleteApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + msg: String + numUids: Int +} + +type DeleteApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + msg: String + numUids: Int +} + +type DeleteApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + msg: String + numUids: Int +} + +type DeleteApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + msg: String + numUids: Int +} + +type DeleteArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + msg: String + numUids: Int +} + +type DeleteArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + msg: String + numUids: Int +} + +type DeleteBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + msg: String + numUids: Int +} + +type DeleteCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + msg: String + numUids: Int +} + +type DeleteComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + msg: String + numUids: Int +} + +type DeleteCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + msg: String + numUids: Int +} + +type DeleteCVSSPayload { + cVSS(filter: CVSSFilter, order: CVSSOrder, first: Int, offset: Int): [CVSS] + msg: String + numUids: Int +} + +type DeleteCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + msg: String + numUids: Int +} + +type DeleteDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + msg: String + numUids: Int +} + +type DeleteEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + msg: String + numUids: Int +} + +type DeleteFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + msg: String + numUids: Int +} + +type DeleteIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + msg: String + numUids: Int +} + +type DeleteKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + msg: String + numUids: Int +} + +type DeleteOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + msg: String + numUids: Int +} + +type DeletePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + msg: String + numUids: Int +} + +type DeletePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + msg: String + numUids: Int +} + +type DeleteRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + msg: String + numUids: Int +} + +type DeleteRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + msg: String + numUids: Int +} + +type DeleteRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + msg: String + numUids: Int +} + +type DeleteSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + msg: String + numUids: Int +} + +type DeleteSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + msg: String + numUids: Int +} + +type DeleteTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + msg: String + numUids: Int +} + +type DeleteTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + msg: String + numUids: Int +} + +type DeleteToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + msg: String + numUids: Int +} + +type DeleteVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + msg: String + numUids: Int +} + +"""DeploymentStage is an enum denoting the stage of the deployment. .""" +enum DeploymentStage { + """deployment is discovered from the events""" + discovered + + """ + deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + """ + current + + """ + deployment becomes a past deployment because another fresh deployment has happened + """ + previous + + """deployment is blocked by the firewall""" + blocked +} + +input DeploymentStage_exact { + eq: DeploymentStage + in: [DeploymentStage] + le: DeploymentStage + lt: DeploymentStage + ge: DeploymentStage + gt: DeploymentStage + between: DeploymentStage +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget { + """id is randomly assigned""" + id: String! + name: String! + ip: String! + isFirewall: Boolean + organization(filter: OrganizationFilter): Organization! + defaultEnvironment(filter: EnvironmentFilter): Environment! +} + +type DeploymentTargetAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + ipMin: String + ipMax: String +} + +input DeploymentTargetFilter { + id: StringHashFilter + name: StringExactFilter + ip: StringExactFilter + has: [DeploymentTargetHasFilter] + and: [DeploymentTargetFilter] + or: [DeploymentTargetFilter] + not: DeploymentTargetFilter +} + +enum DeploymentTargetHasFilter { + id + name + ip + isFirewall + organization + defaultEnvironment +} + +input DeploymentTargetOrder { + asc: DeploymentTargetOrderable + desc: DeploymentTargetOrderable + then: DeploymentTargetOrder +} + +enum DeploymentTargetOrderable { + id + name + ip +} + +input DeploymentTargetPatch { + name: String + ip: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +input DeploymentTargetRef { + """id is randomly assigned""" + id: String + name: String + ip: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +enum DgraphIndex { + int + int64 + float + bool + hash + exact + term + fulltext + trigram + regexp + year + month + day + hour + geo +} + +"""Environment can be things like dev, prod, staging etc.""" +type Environment { + id: String! + organization(filter: OrganizationFilter): Organization! + purpose: String! +} + +type EnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + purposeMin: String + purposeMax: String +} + +input EnvironmentFilter { + id: StringHashFilter + purpose: StringExactFilter + has: [EnvironmentHasFilter] + and: [EnvironmentFilter] + or: [EnvironmentFilter] + not: EnvironmentFilter +} + +enum EnvironmentHasFilter { + id + organization + purpose +} + +input EnvironmentOrder { + asc: EnvironmentOrderable + desc: EnvironmentOrderable + then: EnvironmentOrder +} + +enum EnvironmentOrderable { + id + purpose +} + +input EnvironmentPatch { + organization: OrganizationRef + purpose: String +} + +input EnvironmentRef { + id: String + organization: OrganizationRef + purpose: String +} + +type FeatureMode { + id: String! + organization(filter: OrganizationFilter): Organization! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type FeatureModeAggregateResult { + count: Int + idMin: String + idMax: String + scanMin: String + scanMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input FeatureModeFilter { + id: StringHashFilter + scan: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [FeatureModeHasFilter] + and: [FeatureModeFilter] + or: [FeatureModeFilter] + not: FeatureModeFilter +} + +enum FeatureModeHasFilter { + id + organization + scan + type + enabled + category + createdAt + updatedAt +} + +input FeatureModeOrder { + asc: FeatureModeOrderable + desc: FeatureModeOrderable + then: FeatureModeOrder +} + +enum FeatureModeOrderable { + id + scan + type + category + createdAt + updatedAt +} + +input FeatureModePatch { + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FeatureModeRef { + id: String + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FloatFilter { + eq: Float + in: [Float] + le: Float + lt: Float + ge: Float + gt: Float + between: FloatRange +} + +input FloatRange { + min: Float! + max: Float! +} + +input GenerateMutationParams { + add: Boolean + update: Boolean + delete: Boolean +} + +input GenerateQueryParams { + get: Boolean + query: Boolean + password: Boolean + aggregate: Boolean +} + +enum HTTPMethod { + GET + POST + PUT + PATCH + DELETE +} + +""" +The Int64 scalar type represents a signed 64‐bit numeric non‐fractional value. +Int64 can represent values in range [-(2^63),(2^63 - 1)]. +""" +scalar Int64 + +input Int64Filter { + eq: Int64 + in: [Int64] + le: Int64 + lt: Int64 + ge: Int64 + gt: Int64 + between: Int64Range +} + +input Int64Range { + min: Int64! + max: Int64! +} + +type Integrator { + id: String! + organization(filter: OrganizationFilter): Organization! + name: String! + type: String! + category: String! + credentials(filter: CredentialsFilter): Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type IntegratorAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input IntegratorFilter { + id: StringHashFilter + name: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [IntegratorHasFilter] + and: [IntegratorFilter] + or: [IntegratorFilter] + not: IntegratorFilter +} + +enum IntegratorHasFilter { + id + organization + name + type + category + credentials + createdAt + updatedAt +} + +input IntegratorOrder { + asc: IntegratorOrderable + desc: IntegratorOrderable + then: IntegratorOrder +} + +enum IntegratorOrderable { + id + name + type + category + createdAt + updatedAt +} + +input IntegratorPatch { + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntegratorRef { + id: String + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntersectsFilter { + polygon: PolygonRef + multiPolygon: MultiPolygonRef +} + +input IntFilter { + eq: Int + in: [Int] + le: Int + lt: Int + ge: Int + gt: Int + between: IntRange +} + +input IntRange { + min: Int! + max: Int! +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! + name: String! + value: String! +} + +type KeyValueAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + valueMin: String + valueMax: String +} + +input KeyValueFilter { + id: StringHashFilter + has: [KeyValueHasFilter] + and: [KeyValueFilter] + or: [KeyValueFilter] + not: KeyValueFilter +} + +enum KeyValueHasFilter { + id + name + value +} + +input KeyValueOrder { + asc: KeyValueOrderable + desc: KeyValueOrderable + then: KeyValueOrder +} + +enum KeyValueOrderable { + id + name + value +} + +input KeyValuePatch { + name: String + value: String +} + +input KeyValueRef { + id: String + name: String + value: String +} + +enum Mode { + BATCH + SINGLE +} + +type MultiPolygon { + polygons: [Polygon!]! +} + +input MultiPolygonRef { + polygons: [PolygonRef!]! +} + +type Mutation { + addSchemaVersion(input: [AddSchemaVersionInput!]!): AddSchemaVersionPayload + updateSchemaVersion(input: UpdateSchemaVersionInput!): UpdateSchemaVersionPayload + deleteSchemaVersion(filter: SchemaVersionFilter!): DeleteSchemaVersionPayload + updateRBAC(input: UpdateRBACInput!): UpdateRBACPayload + deleteRBAC(filter: RBACFilter!): DeleteRBACPayload + addRole(input: [AddRoleInput!]!, upsert: Boolean): AddRolePayload + updateRole(input: UpdateRoleInput!): UpdateRolePayload + deleteRole(filter: RoleFilter!): DeleteRolePayload + addKeyValue(input: [AddKeyValueInput!]!, upsert: Boolean): AddKeyValuePayload + updateKeyValue(input: UpdateKeyValueInput!): UpdateKeyValuePayload + deleteKeyValue(filter: KeyValueFilter!): DeleteKeyValuePayload + addOrganization(input: [AddOrganizationInput!]!, upsert: Boolean): AddOrganizationPayload + updateOrganization(input: UpdateOrganizationInput!): UpdateOrganizationPayload + deleteOrganization(filter: OrganizationFilter!): DeleteOrganizationPayload + addEnvironment(input: [AddEnvironmentInput!]!, upsert: Boolean): AddEnvironmentPayload + updateEnvironment(input: UpdateEnvironmentInput!): UpdateEnvironmentPayload + deleteEnvironment(filter: EnvironmentFilter!): DeleteEnvironmentPayload + addDeploymentTarget(input: [AddDeploymentTargetInput!]!, upsert: Boolean): AddDeploymentTargetPayload + updateDeploymentTarget(input: UpdateDeploymentTargetInput!): UpdateDeploymentTargetPayload + deleteDeploymentTarget(filter: DeploymentTargetFilter!): DeleteDeploymentTargetPayload + addTeam(input: [AddTeamInput!]!, upsert: Boolean): AddTeamPayload + updateTeam(input: UpdateTeamInput!): UpdateTeamPayload + deleteTeam(filter: TeamFilter!): DeleteTeamPayload + addApplication(input: [AddApplicationInput!]!, upsert: Boolean): AddApplicationPayload + updateApplication(input: UpdateApplicationInput!): UpdateApplicationPayload + deleteApplication(filter: ApplicationFilter!): DeleteApplicationPayload + addApplicationEnvironment(input: [AddApplicationEnvironmentInput!]!, upsert: Boolean): AddApplicationEnvironmentPayload + updateApplicationEnvironment(input: UpdateApplicationEnvironmentInput!): UpdateApplicationEnvironmentPayload + deleteApplicationEnvironment(filter: ApplicationEnvironmentFilter!): DeleteApplicationEnvironmentPayload + addApplicationRiskStatus(input: [AddApplicationRiskStatusInput!]!): AddApplicationRiskStatusPayload + updateApplicationRiskStatus(input: UpdateApplicationRiskStatusInput!): UpdateApplicationRiskStatusPayload + deleteApplicationRiskStatus(filter: ApplicationRiskStatusFilter!): DeleteApplicationRiskStatusPayload + addApplicationDeployment(input: [AddApplicationDeploymentInput!]!, upsert: Boolean): AddApplicationDeploymentPayload + updateApplicationDeployment(input: UpdateApplicationDeploymentInput!): UpdateApplicationDeploymentPayload + deleteApplicationDeployment(filter: ApplicationDeploymentFilter!): DeleteApplicationDeploymentPayload + addToolsUsed(input: [AddToolsUsedInput!]!): AddToolsUsedPayload + updateToolsUsed(input: UpdateToolsUsedInput!): UpdateToolsUsedPayload + deleteToolsUsed(filter: ToolsUsedFilter!): DeleteToolsUsedPayload + addApplicationDeploymentRisk(input: [AddApplicationDeploymentRiskInput!]!): AddApplicationDeploymentRiskPayload + updateApplicationDeploymentRisk(input: UpdateApplicationDeploymentRiskInput!): UpdateApplicationDeploymentRiskPayload + deleteApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter!): DeleteApplicationDeploymentRiskPayload + addIntegrator(input: [AddIntegratorInput!]!, upsert: Boolean): AddIntegratorPayload + updateIntegrator(input: UpdateIntegratorInput!): UpdateIntegratorPayload + deleteIntegrator(filter: IntegratorFilter!): DeleteIntegratorPayload + addCredentials(input: [AddCredentialsInput!]!): AddCredentialsPayload + updateCredentials(input: UpdateCredentialsInput!): UpdateCredentialsPayload + deleteCredentials(filter: CredentialsFilter!): DeleteCredentialsPayload + addFeatureMode(input: [AddFeatureModeInput!]!, upsert: Boolean): AddFeatureModePayload + updateFeatureMode(input: UpdateFeatureModeInput!): UpdateFeatureModePayload + deleteFeatureMode(filter: FeatureModeFilter!): DeleteFeatureModePayload + addTag(input: [AddTagInput!]!, upsert: Boolean): AddTagPayload + updateTag(input: UpdateTagInput!): UpdateTagPayload + deleteTag(filter: TagFilter!): DeleteTagPayload + addPolicyDefinition(input: [AddPolicyDefinitionInput!]!, upsert: Boolean): AddPolicyDefinitionPayload + updatePolicyDefinition(input: UpdatePolicyDefinitionInput!): UpdatePolicyDefinitionPayload + deletePolicyDefinition(filter: PolicyDefinitionFilter!): DeletePolicyDefinitionPayload + addPolicyEnforcement(input: [AddPolicyEnforcementInput!]!): AddPolicyEnforcementPayload + updatePolicyEnforcement(input: UpdatePolicyEnforcementInput!): UpdatePolicyEnforcementPayload + deletePolicyEnforcement(filter: PolicyEnforcementFilter!): DeletePolicyEnforcementPayload + addRunHistory(input: [AddRunHistoryInput!]!): AddRunHistoryPayload + updateRunHistory(input: UpdateRunHistoryInput!): UpdateRunHistoryPayload + deleteRunHistory(filter: RunHistoryFilter!): DeleteRunHistoryPayload + addBuildTool(input: [AddBuildToolInput!]!, upsert: Boolean): AddBuildToolPayload + updateBuildTool(input: UpdateBuildToolInput!): UpdateBuildToolPayload + deleteBuildTool(filter: BuildToolFilter!): DeleteBuildToolPayload + addSourceCodeTool(input: [AddSourceCodeToolInput!]!, upsert: Boolean): AddSourceCodeToolPayload + updateSourceCodeTool(input: UpdateSourceCodeToolInput!): UpdateSourceCodeToolPayload + deleteSourceCodeTool(filter: SourceCodeToolFilter!): DeleteSourceCodeToolPayload + addCommitMetaData(input: [AddCommitMetaDataInput!]!): AddCommitMetaDataPayload + updateCommitMetaData(input: UpdateCommitMetaDataInput!): UpdateCommitMetaDataPayload + deleteCommitMetaData(filter: CommitMetaDataFilter!): DeleteCommitMetaDataPayload + addArtifact(input: [AddArtifactInput!]!, upsert: Boolean): AddArtifactPayload + updateArtifact(input: UpdateArtifactInput!): UpdateArtifactPayload + deleteArtifact(filter: ArtifactFilter!): DeleteArtifactPayload + addArtifactScanData(input: [AddArtifactScanDataInput!]!, upsert: Boolean): AddArtifactScanDataPayload + updateArtifactScanData(input: UpdateArtifactScanDataInput!): UpdateArtifactScanDataPayload + deleteArtifactScanData(filter: ArtifactScanDataFilter!): DeleteArtifactScanDataPayload + addComponent(input: [AddComponentInput!]!, upsert: Boolean): AddComponentPayload + updateComponent(input: UpdateComponentInput!): UpdateComponentPayload + deleteComponent(filter: ComponentFilter!): DeleteComponentPayload + addVulnerability(input: [AddVulnerabilityInput!]!, upsert: Boolean): AddVulnerabilityPayload + updateVulnerability(input: UpdateVulnerabilityInput!): UpdateVulnerabilityPayload + deleteVulnerability(filter: VulnerabilityFilter!): DeleteVulnerabilityPayload + addCWE(input: [AddCWEInput!]!, upsert: Boolean): AddCWEPayload + updateCWE(input: UpdateCWEInput!): UpdateCWEPayload + deleteCWE(filter: CWEFilter!): DeleteCWEPayload + addCVSS(input: [AddCVSSInput!]!): AddCVSSPayload + updateCVSS(input: UpdateCVSSInput!): UpdateCVSSPayload + deleteCVSS(filter: CVSSFilter!): DeleteCVSSPayload +} + +input NearFilter { + distance: Float! + coordinate: PointRef! +} + +type Organization implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + teams(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team!] + environments(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + integrators(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator!] + featureModes(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + teamsAggregate(filter: TeamFilter): TeamAggregateResult + environmentsAggregate(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + integratorsAggregate(filter: IntegratorFilter): IntegratorAggregateResult + featureModesAggregate(filter: FeatureModeFilter): FeatureModeAggregateResult +} + +type OrganizationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input OrganizationFilter { + id: StringHashFilter + name: StringExactFilter + has: [OrganizationHasFilter] + and: [OrganizationFilter] + or: [OrganizationFilter] + not: OrganizationFilter +} + +enum OrganizationHasFilter { + id + name + roles + teams + environments + policies + policyEnforcements + integrators + featureModes +} + +input OrganizationOrder { + asc: OrganizationOrderable + desc: OrganizationOrderable + then: OrganizationOrder +} + +enum OrganizationOrderable { + id + name +} + +input OrganizationPatch { + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +input OrganizationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type Point { + longitude: Float! + latitude: Float! +} + +input PointGeoFilter { + near: NearFilter + within: WithinFilter +} + +type PointList { + points: [Point!]! +} + +input PointListRef { + points: [PointRef!]! +} + +input PointRef { + longitude: Float! + latitude: Float! +} + +type PolicyDefinition { + id: String! + ownerOrg(filter: OrganizationFilter): Organization! + ownerTeam(filter: TeamFilter): Team + ownerApplication(filter: ApplicationFilter): Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyDefinitionAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime + policyNameMin: String + policyNameMax: String + categoryMin: String + categoryMax: String + stageMin: String + stageMax: String + descriptionMin: String + descriptionMax: String + scriptMin: String + scriptMax: String + variablesMin: String + variablesMax: String + conditionNameMin: String + conditionNameMax: String + suggestionMin: String + suggestionMax: String +} + +input PolicyDefinitionFilter { + id: StringHashFilter + policyName: StringExactFilter + category: StringExactFilter + stage: StringExactFilter + has: [PolicyDefinitionHasFilter] + and: [PolicyDefinitionFilter] + or: [PolicyDefinitionFilter] + not: PolicyDefinitionFilter +} + +enum PolicyDefinitionHasFilter { + id + ownerOrg + ownerTeam + ownerApplication + createdAt + updatedAt + policyName + category + stage + description + scheduledPolicy + script + variables + conditionName + suggestion +} + +input PolicyDefinitionOrder { + asc: PolicyDefinitionOrderable + desc: PolicyDefinitionOrderable + then: PolicyDefinitionOrder +} + +enum PolicyDefinitionOrderable { + id + createdAt + updatedAt + policyName + category + stage + description + script + variables + conditionName + suggestion +} + +input PolicyDefinitionPatch { + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +input PolicyDefinitionRef { + id: String + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy(filter: PolicyDefinitionFilter): PolicyDefinition! + enforcedOrg(filter: OrganizationFilter): Organization + enforcedTeam(filter: TeamFilter): Team + enforcedApplication(filter: ApplicationFilter): Application + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment!] + tags(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag!] + createdAt: DateTime! + updatedAt: DateTime! + environmentsAggregate(filter: EnvironmentFilter): EnvironmentAggregateResult + tagsAggregate(filter: TagFilter): TagAggregateResult +} + +type PolicyEnforcementAggregateResult { + count: Int + datasourceToolMin: String + datasourceToolMax: String + actionMin: String + actionMax: String + conditionValueMin: String + conditionValueMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input PolicyEnforcementFilter { + id: [ID!] + status: Boolean + datasourceTool: StringExactFilter + action: StringExactFilter + has: [PolicyEnforcementHasFilter] + and: [PolicyEnforcementFilter] + or: [PolicyEnforcementFilter] + not: PolicyEnforcementFilter +} + +enum PolicyEnforcementHasFilter { + policy + enforcedOrg + enforcedTeam + enforcedApplication + status + forceApply + severity + datasourceTool + action + conditionValue + environments + tags + createdAt + updatedAt +} + +input PolicyEnforcementOrder { + asc: PolicyEnforcementOrderable + desc: PolicyEnforcementOrderable + then: PolicyEnforcementOrder +} + +enum PolicyEnforcementOrderable { + datasourceTool + action + conditionValue + createdAt + updatedAt +} + +input PolicyEnforcementPatch { + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +input PolicyEnforcementRef { + id: ID + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +type Polygon { + coordinates: [PointList!]! +} + +input PolygonGeoFilter { + near: NearFilter + within: WithinFilter + contains: ContainsFilter + intersects: IntersectsFilter +} + +input PolygonRef { + coordinates: [PointListRef!]! +} + +type Query { + querySchemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + aggregateSchemaVersion(filter: SchemaVersionFilter): SchemaVersionAggregateResult + queryRBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + aggregateRBAC(filter: RBACFilter): RBACAggregateResult + getRole(id: String!): Role + queryRole(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + aggregateRole(filter: RoleFilter): RoleAggregateResult + getKeyValue(id: String!): KeyValue + queryKeyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + aggregateKeyValue(filter: KeyValueFilter): KeyValueAggregateResult + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getEnvironment(id: String!): Environment + queryEnvironment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + aggregateEnvironment(filter: EnvironmentFilter): EnvironmentAggregateResult + getDeploymentTarget(id: String!): DeploymentTarget + queryDeploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + aggregateDeploymentTarget(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + getApplicationRiskStatus(id: ID!): ApplicationRiskStatus + queryApplicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + aggregateApplicationRiskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatusAggregateResult + getApplicationDeployment(id: String!): ApplicationDeployment + queryApplicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + aggregateApplicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + getToolsUsed(id: ID!): ToolsUsed + queryToolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + aggregateToolsUsed(filter: ToolsUsedFilter): ToolsUsedAggregateResult + getApplicationDeploymentRisk(id: ID!): ApplicationDeploymentRisk + queryApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + aggregateApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRiskAggregateResult + getIntegrator(id: String!): Integrator + queryIntegrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + aggregateIntegrator(filter: IntegratorFilter): IntegratorAggregateResult + getCredentials(id: ID!): Credentials + queryCredentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + aggregateCredentials(filter: CredentialsFilter): CredentialsAggregateResult + getFeatureMode(id: String!): FeatureMode + queryFeatureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + aggregateFeatureMode(filter: FeatureModeFilter): FeatureModeAggregateResult + getTag(id: String!): Tag + queryTag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + aggregateTag(filter: TagFilter): TagAggregateResult + getPolicyDefinition(id: String!): PolicyDefinition + queryPolicyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + aggregatePolicyDefinition(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + getPolicyEnforcement(id: ID!): PolicyEnforcement + queryPolicyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + aggregatePolicyEnforcement(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + getRunHistory(id: ID!): RunHistory + queryRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + aggregateRunHistory(filter: RunHistoryFilter): RunHistoryAggregateResult + getBuildTool(id: String!): BuildTool + queryBuildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + aggregateBuildTool(filter: BuildToolFilter): BuildToolAggregateResult + getSourceCodeTool(id: String!): SourceCodeTool + querySourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + aggregateSourceCodeTool(filter: SourceCodeToolFilter): SourceCodeToolAggregateResult + getCommitMetaData(id: ID!): CommitMetaData + queryCommitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + aggregateCommitMetaData(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult + getArtifact(id: String!): Artifact + queryArtifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + aggregateArtifact(filter: ArtifactFilter): ArtifactAggregateResult + getArtifactScanData(id: String!): ArtifactScanData + queryArtifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + aggregateArtifactScanData(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + getComponent(id: String!): Component + queryComponent(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + aggregateComponent(filter: ComponentFilter): ComponentAggregateResult + getVulnerability(id: String!): Vulnerability + queryVulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + aggregateVulnerability(filter: VulnerabilityFilter): VulnerabilityAggregateResult + getCWE(id: String!): CWE + queryCWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + aggregateCWE(filter: CWEFilter): CWEAggregateResult + queryCVSS(filter: CVSSFilter, order: CVSSOrder, first: Int, offset: Int): [CVSS] + aggregateCVSS(filter: CVSSFilter): CVSSAggregateResult +} + +interface RBAC { + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult +} + +type RBACAggregateResult { + count: Int +} + +input RBACFilter { + has: [RBACHasFilter] + and: [RBACFilter] + or: [RBACFilter] + not: RBACFilter +} + +enum RBACHasFilter { + roles +} + +input RBACPatch { + roles: [RoleRef!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + inprogress +} + +input RiskStatus_hash { + eq: RiskStatus + in: [RiskStatus] +} + +type Role { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type RoleAggregateResult { + count: Int + idMin: String + idMax: String + groupMin: String + groupMax: String +} + +input RoleFilter { + id: StringHashFilter + group: StringHashFilter + permission: RolePermission_hash + has: [RoleHasFilter] + and: [RoleFilter] + or: [RoleFilter] + not: RoleFilter +} + +enum RoleHasFilter { + id + group + permission +} + +input RoleOrder { + asc: RoleOrderable + desc: RoleOrderable + then: RoleOrder +} + +enum RoleOrderable { + id + group +} + +input RolePatch { + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +enum RolePermission { + admin + write + read +} + +input RolePermission_hash { + eq: RolePermission + in: [RolePermission] +} + +input RoleRef { + """id is randomly assigned""" + id: String + + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +type RunHistory { + id: ID! + policyId: String! + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! + PolicyName: String! + Severity: Severity! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + Status: String! + DatasourceTool: String! + AlertTitle: String + AlertMessage: String + Suggestions: String + Reason: String + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Action: String! + Hash: String + Error: String + Pass: Boolean! + MetaData: String + FileApi: String + JiraUrl: String + scheduledPolicy: Boolean! + policyEnforcements(filter: PolicyEnforcementFilter): PolicyEnforcement! +} + +type RunHistoryAggregateResult { + count: Int + policyIdMin: String + policyIdMax: String + PolicyNameMin: String + PolicyNameMax: String + StageMin: String + StageMax: String + ArtifactMin: String + ArtifactMax: String + ArtifactTagMin: String + ArtifactTagMax: String + ArtifactShaMin: String + ArtifactShaMax: String + ArtifactNameTagMin: String + ArtifactNameTagMax: String + StatusMin: String + StatusMax: String + DatasourceToolMin: String + DatasourceToolMax: String + AlertTitleMin: String + AlertTitleMax: String + AlertMessageMin: String + AlertMessageMax: String + SuggestionsMin: String + SuggestionsMax: String + ReasonMin: String + ReasonMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + DeployedAtMin: DateTime + DeployedAtMax: DateTime + ActionMin: String + ActionMax: String + HashMin: String + HashMax: String + ErrorMin: String + ErrorMax: String + MetaDataMin: String + MetaDataMax: String + FileApiMin: String + FileApiMax: String + JiraUrlMin: String + JiraUrlMax: String +} + +input RunHistoryFilter { + id: [ID!] + policyId: StringExactFilter + PolicyName: StringExactFilter + Severity: Severity_exact + Stage: StringExactFilter + Artifact: StringExactFilter + ArtifactTag: StringExactFilter + ArtifactSha: StringExactFilter + ArtifactNameTag: StringExactFilter + AlertTitle: StringExactFilter + AlertMessage: StringExactFilter + Suggestions: StringExactFilter + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + DeployedAt: DateTimeFilter + Action: StringExactFilter + Error: StringExactFilter + Pass: Boolean + scheduledPolicy: Boolean + has: [RunHistoryHasFilter] + and: [RunHistoryFilter] + or: [RunHistoryFilter] + not: RunHistoryFilter +} + +enum RunHistoryHasFilter { + policyId + applicationDeployment + PolicyName + Severity + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + Status + DatasourceTool + AlertTitle + AlertMessage + Suggestions + Reason + CreatedAt + UpdatedAt + DeployedAt + Action + Hash + Error + Pass + MetaData + FileApi + JiraUrl + scheduledPolicy + policyEnforcements +} + +input RunHistoryOrder { + asc: RunHistoryOrderable + desc: RunHistoryOrderable + then: RunHistoryOrder +} + +enum RunHistoryOrderable { + policyId + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + Status + DatasourceTool + AlertTitle + AlertMessage + Suggestions + Reason + CreatedAt + UpdatedAt + DeployedAt + Action + Hash + Error + MetaData + FileApi + JiraUrl +} + +input RunHistoryPatch { + policyId: String + applicationDeployment: ApplicationDeploymentRef + PolicyName: String + Severity: Severity + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + Status: String + DatasourceTool: String + AlertTitle: String + AlertMessage: String + Suggestions: String + Reason: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Action: String + Hash: String + Error: String + Pass: Boolean + MetaData: String + FileApi: String + JiraUrl: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef +} + +input RunHistoryRef { + id: ID + policyId: String + applicationDeployment: ApplicationDeploymentRef + PolicyName: String + Severity: Severity + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + Status: String + DatasourceTool: String + AlertTitle: String + AlertMessage: String + Suggestions: String + Reason: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Action: String + Hash: String + Error: String + Pass: Boolean + MetaData: String + FileApi: String + JiraUrl: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef +} + +type SchemaVersion { + version: String! +} + +type SchemaVersionAggregateResult { + count: Int + versionMin: String + versionMax: String +} + +input SchemaVersionFilter { + has: [SchemaVersionHasFilter] + and: [SchemaVersionFilter] + or: [SchemaVersionFilter] + not: SchemaVersionFilter +} + +enum SchemaVersionHasFilter { + version +} + +input SchemaVersionOrder { + asc: SchemaVersionOrderable + desc: SchemaVersionOrderable + then: SchemaVersionOrder +} + +enum SchemaVersionOrderable { + version +} + +input SchemaVersionPatch { + version: String +} + +input SchemaVersionRef { + version: String +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +input Severity_exact { + eq: Severity + in: [Severity] + le: Severity + lt: Severity + ge: Severity + gt: Severity + between: Severity +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool(filter: BuildToolFilter): BuildTool! +} + +type SourceCodeToolAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + scmMin: String + scmMax: String + repositoryMin: String + repositoryMax: String + branchMin: String + branchMax: String + headCommitMin: String + headCommitMax: String + diffCommitsMin: String + diffCommitsMax: String + licenseNameMin: String + licenseNameMax: String + visibilityMin: String + visibilityMax: String + parentRepoMin: String + parentRepoMax: String +} + +input SourceCodeToolFilter { + id: StringHashFilter + has: [SourceCodeToolHasFilter] + and: [SourceCodeToolFilter] + or: [SourceCodeToolFilter] + not: SourceCodeToolFilter +} + +enum SourceCodeToolHasFilter { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + parentRepo + buildTool +} + +input SourceCodeToolOrder { + asc: SourceCodeToolOrderable + desc: SourceCodeToolOrderable + then: SourceCodeToolOrder +} + +enum SourceCodeToolOrderable { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + parentRepo +} + +input SourceCodeToolPatch { + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input SourceCodeToolRef { + """id is randomly assigned""" + id: String + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input StringExactFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange +} + +input StringFullTextFilter { + alloftext: String + anyoftext: String +} + +input StringHashFilter { + eq: String + in: [String] +} + +input StringRange { + min: String! + max: String! +} + +input StringRegExpFilter { + regexp: String +} + +input StringTermFilter { + allofterms: String + anyofterms: String +} + +type Subscription { + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult +} + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + policiesAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TagAggregateResult { + count: Int + idMin: String + idMax: String + tagNameMin: String + tagNameMax: String + tagValueMin: String + tagValueMax: String + tagDescriptionMin: String + tagDescriptionMax: String + createdByMin: String + createdByMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input TagFilter { + id: StringExactFilter + tagName: StringExactFilter + tagValue: StringExactFilter + createdBy: StringExactFilter + has: [TagHasFilter] + and: [TagFilter] + or: [TagFilter] + not: TagFilter +} + +enum TagHasFilter { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt + policies +} + +input TagOrder { + asc: TagOrderable + desc: TagOrderable + then: TagOrder +} + +enum TagOrderable { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt +} + +input TagPatch { + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +input TagRef { + id: String + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +type Team implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + organization(filter: OrganizationFilter): Organization! + applications(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application!] + labels(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + applicationsAggregate(filter: ApplicationFilter): ApplicationAggregateResult + labelsAggregate(filter: KeyValueFilter): KeyValueAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TeamAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input TeamFilter { + id: StringHashFilter + name: StringExactFilter + has: [TeamHasFilter] + and: [TeamFilter] + or: [TeamFilter] + not: TeamFilter +} + +enum TeamHasFilter { + id + name + roles + organization + applications + labels + policies + policyEnforcements +} + +input TeamOrder { + asc: TeamOrderable + desc: TeamOrderable + then: TeamOrder +} + +enum TeamOrderable { + id + name +} + +input TeamPatch { + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +input TeamRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type ToolsUsedAggregateResult { + count: Int + sourceMin: String + sourceMax: String + buildMin: String + buildMax: String + artifactMin: String + artifactMax: String + deployMin: String + deployMax: String + sbomMin: String + sbomMax: String +} + +input ToolsUsedFilter { + id: [ID!] + has: [ToolsUsedHasFilter] + and: [ToolsUsedFilter] + or: [ToolsUsedFilter] + not: ToolsUsedFilter +} + +enum ToolsUsedHasFilter { + source + build + artifact + deploy + sbom + misc +} + +input ToolsUsedOrder { + asc: ToolsUsedOrderable + desc: ToolsUsedOrderable + then: ToolsUsedOrder +} + +enum ToolsUsedOrderable { + source + build + artifact + deploy + sbom +} + +input ToolsUsedPatch { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input ToolsUsedRef { + id: ID + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input UpdateApplicationDeploymentInput { + filter: ApplicationDeploymentFilter! + set: ApplicationDeploymentPatch + remove: ApplicationDeploymentPatch +} + +type UpdateApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input UpdateApplicationDeploymentRiskInput { + filter: ApplicationDeploymentRiskFilter! + set: ApplicationDeploymentRiskPatch + remove: ApplicationDeploymentRiskPatch +} + +type UpdateApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input UpdateApplicationEnvironmentInput { + filter: ApplicationEnvironmentFilter! + set: ApplicationEnvironmentPatch + remove: ApplicationEnvironmentPatch +} + +type UpdateApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input UpdateApplicationInput { + filter: ApplicationFilter! + set: ApplicationPatch + remove: ApplicationPatch +} + +type UpdateApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input UpdateApplicationRiskStatusInput { + filter: ApplicationRiskStatusFilter! + set: ApplicationRiskStatusPatch + remove: ApplicationRiskStatusPatch +} + +type UpdateApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input UpdateArtifactInput { + filter: ArtifactFilter! + set: ArtifactPatch + remove: ArtifactPatch +} + +type UpdateArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input UpdateArtifactScanDataInput { + filter: ArtifactScanDataFilter! + set: ArtifactScanDataPatch + remove: ArtifactScanDataPatch +} + +type UpdateArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input UpdateBuildToolInput { + filter: BuildToolFilter! + set: BuildToolPatch + remove: BuildToolPatch +} + +type UpdateBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input UpdateCommitMetaDataInput { + filter: CommitMetaDataFilter! + set: CommitMetaDataPatch + remove: CommitMetaDataPatch +} + +type UpdateCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input UpdateComponentInput { + filter: ComponentFilter! + set: ComponentPatch + remove: ComponentPatch +} + +type UpdateComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input UpdateCredentialsInput { + filter: CredentialsFilter! + set: CredentialsPatch + remove: CredentialsPatch +} + +type UpdateCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input UpdateCVSSInput { + filter: CVSSFilter! + set: CVSSPatch + remove: CVSSPatch +} + +type UpdateCVSSPayload { + cVSS(filter: CVSSFilter, order: CVSSOrder, first: Int, offset: Int): [CVSS] + numUids: Int +} + +input UpdateCWEInput { + filter: CWEFilter! + set: CWEPatch + remove: CWEPatch +} + +type UpdateCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input UpdateDeploymentTargetInput { + filter: DeploymentTargetFilter! + set: DeploymentTargetPatch + remove: DeploymentTargetPatch +} + +type UpdateDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input UpdateEnvironmentInput { + filter: EnvironmentFilter! + set: EnvironmentPatch + remove: EnvironmentPatch +} + +type UpdateEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input UpdateFeatureModeInput { + filter: FeatureModeFilter! + set: FeatureModePatch + remove: FeatureModePatch +} + +type UpdateFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input UpdateIntegratorInput { + filter: IntegratorFilter! + set: IntegratorPatch + remove: IntegratorPatch +} + +type UpdateIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input UpdateKeyValueInput { + filter: KeyValueFilter! + set: KeyValuePatch + remove: KeyValuePatch +} + +type UpdateKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input UpdateOrganizationInput { + filter: OrganizationFilter! + set: OrganizationPatch + remove: OrganizationPatch +} + +type UpdateOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input UpdatePolicyDefinitionInput { + filter: PolicyDefinitionFilter! + set: PolicyDefinitionPatch + remove: PolicyDefinitionPatch +} + +type UpdatePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input UpdatePolicyEnforcementInput { + filter: PolicyEnforcementFilter! + set: PolicyEnforcementPatch + remove: PolicyEnforcementPatch +} + +type UpdatePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input UpdateRBACInput { + filter: RBACFilter! + set: RBACPatch + remove: RBACPatch +} + +type UpdateRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + numUids: Int +} + +input UpdateRoleInput { + filter: RoleFilter! + set: RolePatch + remove: RolePatch +} + +type UpdateRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input UpdateRunHistoryInput { + filter: RunHistoryFilter! + set: RunHistoryPatch + remove: RunHistoryPatch +} + +type UpdateRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input UpdateSchemaVersionInput { + filter: SchemaVersionFilter! + set: SchemaVersionPatch + remove: SchemaVersionPatch +} + +type UpdateSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input UpdateSourceCodeToolInput { + filter: SourceCodeToolFilter! + set: SourceCodeToolPatch + remove: SourceCodeToolPatch +} + +type UpdateSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input UpdateTagInput { + filter: TagFilter! + set: TagPatch + remove: TagPatch +} + +type UpdateTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input UpdateTeamInput { + filter: TeamFilter! + set: TeamPatch + remove: TeamPatch +} + +type UpdateTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input UpdateToolsUsedInput { + filter: ToolsUsedFilter! + set: ToolsUsedPatch + remove: ToolsUsedPatch +} + +type UpdateToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input UpdateVulnerabilityInput { + filter: VulnerabilityFilter! + set: VulnerabilityPatch + remove: VulnerabilityPatch +} + +type UpdateVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Vulnerability { + id: String! + parent: String! + ratings: Severity + cwes(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss(filter: CVSSFilter, order: CVSSOrder, first: Int, offset: Int): [CVSS!] + affects(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + cwesAggregate(filter: CWEFilter): CWEAggregateResult + cvssAggregate(filter: CVSSFilter): CVSSAggregateResult + affectsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type VulnerabilityAggregateResult { + count: Int + idMin: String + idMax: String + parentMin: String + parentMax: String + summaryMin: String + summaryMax: String + detailMin: String + detailMax: String + recommendationMin: String + recommendationMax: String + publishedMin: DateTime + publishedMax: DateTime + modifiedMin: DateTime + modifiedMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime +} + +input VulnerabilityFilter { + id: StringHashFilter + parent: StringExactFilter + ratings: Severity_exact + createdAt: DateTimeFilter + has: [VulnerabilityHasFilter] + and: [VulnerabilityFilter] + or: [VulnerabilityFilter] + not: VulnerabilityFilter +} + +enum VulnerabilityHasFilter { + id + parent + ratings + cwes + summary + detail + recommendation + published + modified + createdAt + cvss + affects +} + +input VulnerabilityOrder { + asc: VulnerabilityOrderable + desc: VulnerabilityOrderable + then: VulnerabilityOrder +} + +enum VulnerabilityOrderable { + id + parent + summary + detail + recommendation + published + modified + createdAt +} + +input VulnerabilityPatch { + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: [CVSSRef!] + affects: [ComponentRef!] +} + +input VulnerabilityRef { + id: String + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: [CVSSRef!] + affects: [ComponentRef!] +} + +input WithinFilter { + polygon: PolygonRef! +} + diff --git a/april2024june2024/de-duplication.go b/april2024june2024/de-duplication.go new file mode 100644 index 0000000..530006c --- /dev/null +++ b/april2024june2024/de-duplication.go @@ -0,0 +1,94 @@ +package april2024june2024 + +import ( + "context" + "fmt" + "upgradationScript/april2024june2024/april2024" + "upgradationScript/april2024june2024/june2024" + + "upgradationScript/logger" + + "github.com/Khan/genqlient/graphql" +) + +func performDeDeplicationTransition(prodDgraphClient, expDgraphClient graphql.Client) error { + + ctx := context.Background() + + prodRunhistoriesData, err := april2024.QueryRunHistory(ctx, prodDgraphClient) + if err != nil { + return fmt.Errorf("performDeDeplicationTransition: could'nt query old prodRunhistoriesData to initiate de-duplication transition error: %s", err.Error()) + } + + logger.Sl.Debugf("--------------Commencing de-duplication transition iterations to complete %d -----------------", len(prodRunhistoriesData.QueryRunHistory)) + + for iter, prodRunHistoryData := range prodRunhistoriesData.QueryRunHistory { + logger.Logger.Debug("---------------------------------------------") + logger.Sl.Debugf("De-Duplication Iteration %d to begin", iter) + + logger.Sl.Debugf("Check if security issue exists for alertTitle: %s alertMsg: %s suggestion: %s severity: %s errorMsg: %s action: %s", prodRunHistoryData.AlertTitle, prodRunHistoryData.AlertMessage, prodRunHistoryData.Suggestions, string(prodRunHistoryData.Severity), prodRunHistoryData.Error, prodRunHistoryData.Action) + checkIfSecurityIssuePresent, err := june2024.QuerySecurityIssue(ctx, expDgraphClient, prodRunHistoryData.AlertTitle, prodRunHistoryData.AlertMessage, prodRunHistoryData.Suggestions, june2024.Severity(prodRunHistoryData.Severity), prodRunHistoryData.Error, prodRunHistoryData.Action) + if err != nil { + return fmt.Errorf("performDeDeplicationTransition: could'nt check if security issue data existed error: %s", err.Error()) + } + + if checkIfSecurityIssuePresent == nil || len(checkIfSecurityIssuePresent.QuerySecurityIssue) == 0 { + logger.Logger.Debug("Security Issue of such metadata does not exist adding new") + ip := june2024.AddSecurityIssueInput{ + AlertTitle: prodRunHistoryData.AlertTitle, + AlertMessage: prodRunHistoryData.AlertMessage, + Suggestions: prodRunHistoryData.Suggestions, + Severity: june2024.Severity(prodRunHistoryData.Severity), + Action: prodRunHistoryData.Action, + Error: prodRunHistoryData.Error, + JiraUrl: prodRunHistoryData.JiraUrl, + Status: "active", + Reason: "", + CreatedAt: prodRunHistoryData.CreatedAt, + UpdatedAt: prodRunHistoryData.CreatedAt, + } + + addSecurityIssue, err := june2024.AddSecurityIssue(ctx, expDgraphClient, &ip) + if err != nil { + return fmt.Errorf("performDeDeplicationTransition: could not add security issue err: %s", err.Error()) + } + + checkIfSecurityIssuePresent.QuerySecurityIssue = append(checkIfSecurityIssuePresent.QuerySecurityIssue, &june2024.QuerySecurityIssueQuerySecurityIssue{}) + checkIfSecurityIssuePresent.QuerySecurityIssue[0].Id = addSecurityIssue.AddSecurityIssue.SecurityIssue[0].Id + checkIfSecurityIssuePresent.QuerySecurityIssue[0].CreatedAt = prodRunHistoryData.CreatedAt + checkIfSecurityIssuePresent.QuerySecurityIssue[0].UpdatedAt = prodRunHistoryData.CreatedAt + + logger.Logger.Debug("Security Issue of such metadata added") + } + + logger.Sl.Debugf("updating run history id: %s by attaching it with security issue id: %s", *prodRunHistoryData.Id, *checkIfSecurityIssuePresent.QuerySecurityIssue[0].Id) + if _, err := june2024.UpdateRunHistory(ctx, expDgraphClient, prodRunHistoryData.Id, checkIfSecurityIssuePresent.QuerySecurityIssue[0].Id); err != nil { + return fmt.Errorf("performDeDeplicationTransition: UpdateRunHistory error: %s", err.Error()) + } + logger.Sl.Debug("updated run history successfully") + + createdAt := checkIfSecurityIssuePresent.QuerySecurityIssue[0].CreatedAt + updatedAt := checkIfSecurityIssuePresent.QuerySecurityIssue[0].UpdatedAt + + if checkIfSecurityIssuePresent.QuerySecurityIssue[0].CreatedAt.After(*prodRunHistoryData.CreatedAt) { + createdAt = prodRunHistoryData.CreatedAt + } + + if checkIfSecurityIssuePresent.QuerySecurityIssue[0].UpdatedAt.Before(*prodRunHistoryData.CreatedAt) { + updatedAt = prodRunHistoryData.UpdatedAt + } + + logger.Sl.Debug("updating security issue id: %s with createdAt: %s updatedAt: %s", *checkIfSecurityIssuePresent.QuerySecurityIssue[0].Id, createdAt.String(), updatedAt.String()) + if _, err := june2024.UpdateSecurityIssue(ctx, expDgraphClient, checkIfSecurityIssuePresent.QuerySecurityIssue[0].Id, createdAt, updatedAt); err != nil { + return fmt.Errorf("performDeDeplicationTransition: UpdateSecurityIssue error: %s", err.Error()) + } + logger.Sl.Debug("updated security issue successfully") + + logger.Sl.Debugf("De-Duplication Iteration %d completed", iter) + logger.Logger.Debug("---------------------------------------------") + } + + logger.Logger.Info("------------De-duplication upgrade complete-------------------------") + + return nil +} diff --git a/april2024june2024/june2024/genqlient.yaml b/april2024june2024/june2024/genqlient.yaml new file mode 100644 index 0000000..36b3367 --- /dev/null +++ b/april2024june2024/june2024/genqlient.yaml @@ -0,0 +1,17 @@ +schema: schema.graphql +operations: +- queries.graphql +generated: schema-generated.go +package: june2024 +use_struct_references: true +bindings: + Boolean: + type: "*bool" + DateTime: + type: "*time.Time" + Int64: + type: int64 + Int: + type: "*int" + ID: + type: "*string" diff --git a/april2024june2024/june2024/queries.graphql b/april2024june2024/june2024/queries.graphql new file mode 100644 index 0000000..b292bd7 --- /dev/null +++ b/april2024june2024/june2024/queries.graphql @@ -0,0 +1,107 @@ +query QuerySecurityIssue( + $alertTitle: String! + $alertMsg: String! + $suggestion: String! + $severity: Severity! + $errorMsg: String! + $action: String! +) { + querySecurityIssue( + filter: { + AlertTitle: { eq: $alertTitle } + AlertMessage: { eq: $alertMsg } + Suggestions: { eq: $suggestion } + Severity: { eq: $severity } + Action: { eq: $action } + Error: { eq: $errorMsg } + } + ) { + id + CreatedAt + UpdatedAt + } +} + +mutation AddSecurityIssue($input: AddSecurityIssueInput!) { + addSecurityIssue(input: [$input]) { + securityIssue{ + id + } + } +} + +mutation UpdateSecurityIssue( + $securityIssue: ID! + $createdAt: DateTime + $updatedAt: DateTime +) { + updateSecurityIssue( + input: { + set: { CreatedAt: $createdAt, UpdatedAt: $updatedAt } + filter: { id: [$securityIssue] } + } + ) { + numUids + } +} + +mutation UpdateRunHistory($runHistoryId: ID!, $securityIssueId: ID!) { + updateRunHistory( + input: { + filter: { id: [$runHistoryId] } + set: { securityIssue: { id: $securityIssueId } } + } + ) { + numUids + } +} + +query AppEnvTools { + queryApplicationEnvironment { + id + deployments(order: { asc: deployedAt }) { + id + policyRunHistory(order: { asc: CreatedAt }) { + id + DatasourceTool + } + } + } +} + +mutation UpdateApplicationEnvironmentWithTools( + $id: String! + $tools: [String!]! +) { + updateApplicationEnvironment( + input: { filter: { id: { eq: $id } },set: { toolsUsed: $tools } } + ) { + numUids + } +} + +query QueryApplicationDeploymentWArtifact { + queryApplicationDeployment(order: { asc: deployedAt }) { + id + artifact { + id + artifactDeployment { + id + } + } + } +} + +mutation UpdateArtifactWDeploymentIds( + $artifactId: String! + $deploymentRefs: [ApplicationDeploymentRef!]! +) { + updateArtifact( + input: { + filter: { id: { eq: $artifactId } } + set: { artifactDeployment: $deploymentRefs } + } + ) { + numUids + } +} diff --git a/april2024june2024/june2024/schema-generated.go b/april2024june2024/june2024/schema-generated.go new file mode 100644 index 0000000..ad37986 --- /dev/null +++ b/april2024june2024/june2024/schema-generated.go @@ -0,0 +1,2033 @@ +// Code generated by github.com/Khan/genqlient, DO NOT EDIT. + +package june2024 + +import ( + "context" + "time" + + "github.com/Khan/genqlient/graphql" +) + +// AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload includes the requested fields of the GraphQL type AddSecurityIssuePayload. +type AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload struct { + SecurityIssue []*AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue `json:"securityIssue"` +} + +// GetSecurityIssue returns AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload.SecurityIssue, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload) GetSecurityIssue() []*AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue { + return v.SecurityIssue +} + +// AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue includes the requested fields of the GraphQL type SecurityIssue. +type AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue struct { + Id *string `json:"id"` +} + +// GetId returns AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue.Id, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueAddSecurityIssueAddSecurityIssuePayloadSecurityIssue) GetId() *string { + return v.Id +} + +type AddSecurityIssueInput struct { + AlertTitle string `json:"AlertTitle"` + AlertMessage string `json:"AlertMessage"` + Suggestions string `json:"Suggestions"` + Severity Severity `json:"Severity"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + Action string `json:"Action"` + JiraUrl string `json:"JiraUrl"` + Status string `json:"Status"` + Reason string `json:"Reason"` + Error string `json:"Error"` + Affects []*RunHistoryRef `json:"Affects,omitempty"` +} + +// GetAlertTitle returns AddSecurityIssueInput.AlertTitle, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetAlertTitle() string { return v.AlertTitle } + +// GetAlertMessage returns AddSecurityIssueInput.AlertMessage, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetAlertMessage() string { return v.AlertMessage } + +// GetSuggestions returns AddSecurityIssueInput.Suggestions, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetSuggestions() string { return v.Suggestions } + +// GetSeverity returns AddSecurityIssueInput.Severity, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetSeverity() Severity { return v.Severity } + +// GetCreatedAt returns AddSecurityIssueInput.CreatedAt, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns AddSecurityIssueInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetAction returns AddSecurityIssueInput.Action, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetAction() string { return v.Action } + +// GetJiraUrl returns AddSecurityIssueInput.JiraUrl, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetJiraUrl() string { return v.JiraUrl } + +// GetStatus returns AddSecurityIssueInput.Status, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetStatus() string { return v.Status } + +// GetReason returns AddSecurityIssueInput.Reason, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetReason() string { return v.Reason } + +// GetError returns AddSecurityIssueInput.Error, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetError() string { return v.Error } + +// GetAffects returns AddSecurityIssueInput.Affects, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueInput) GetAffects() []*RunHistoryRef { return v.Affects } + +// AddSecurityIssueResponse is returned by AddSecurityIssue on success. +type AddSecurityIssueResponse struct { + AddSecurityIssue *AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload `json:"addSecurityIssue"` +} + +// GetAddSecurityIssue returns AddSecurityIssueResponse.AddSecurityIssue, and is useful for accessing the field via an interface. +func (v *AddSecurityIssueResponse) GetAddSecurityIssue() *AddSecurityIssueAddSecurityIssueAddSecurityIssuePayload { + return v.AddSecurityIssue +} + +// AppEnvToolsQueryApplicationEnvironment includes the requested fields of the GraphQL type ApplicationEnvironment. +// The GraphQL type's documentation follows. +// +// ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +type AppEnvToolsQueryApplicationEnvironment struct { + // id is randomly assigned + Id string `json:"id"` + Deployments []*AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment `json:"deployments"` +} + +// GetId returns AppEnvToolsQueryApplicationEnvironment.Id, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironment) GetId() string { return v.Id } + +// GetDeployments returns AppEnvToolsQueryApplicationEnvironment.Deployments, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironment) GetDeployments() []*AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment { + return v.Deployments +} + +// AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment includes the requested fields of the GraphQL type ApplicationDeployment. +// The GraphQL type's documentation follows. +// +// ApplicationDeployment tells us about the the artifact deployed along with its associated details. +type AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment struct { + // id is randomly assigned + Id string `json:"id"` + // policyRunHistory is the policy execution history for this deployment + PolicyRunHistory []*AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory `json:"policyRunHistory"` +} + +// GetId returns AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment.Id, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment) GetId() string { + return v.Id +} + +// GetPolicyRunHistory returns AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment.PolicyRunHistory, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeployment) GetPolicyRunHistory() []*AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory { + return v.PolicyRunHistory +} + +// AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory includes the requested fields of the GraphQL type RunHistory. +type AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory struct { + Id *string `json:"id"` + DatasourceTool string `json:"DatasourceTool"` +} + +// GetId returns AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory.Id, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory) GetId() *string { + return v.Id +} + +// GetDatasourceTool returns AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory.DatasourceTool, and is useful for accessing the field via an interface. +func (v *AppEnvToolsQueryApplicationEnvironmentDeploymentsApplicationDeploymentPolicyRunHistory) GetDatasourceTool() string { + return v.DatasourceTool +} + +// AppEnvToolsResponse is returned by AppEnvTools on success. +type AppEnvToolsResponse struct { + QueryApplicationEnvironment []*AppEnvToolsQueryApplicationEnvironment `json:"queryApplicationEnvironment"` +} + +// GetQueryApplicationEnvironment returns AppEnvToolsResponse.QueryApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *AppEnvToolsResponse) GetQueryApplicationEnvironment() []*AppEnvToolsQueryApplicationEnvironment { + return v.QueryApplicationEnvironment +} + +type ApplicationDeploymentRef struct { + // id is randomly assigned + Id string `json:"id"` + Artifact []*ArtifactRef `json:"artifact,omitempty"` + ApplicationEnvironment *ApplicationEnvironmentRef `json:"applicationEnvironment,omitempty"` + DeployedAt *time.Time `json:"deployedAt"` + // deploymentStage is an enum and can be discovered, current, previous or blocked + DeploymentStage DeploymentStage `json:"deploymentStage"` + // source is argo, spinnaker etc + Source string `json:"source"` + // component would be a service + Component string `json:"component"` + // user who deployed the artifact + DeployedBy string `json:"deployedBy"` + ToolsUsed *ToolsUsedRef `json:"toolsUsed,omitempty"` + DeploymentRisk *ApplicationDeploymentRiskRef `json:"deploymentRisk,omitempty"` + PolicyRunHistory []*RunHistoryRef `json:"policyRunHistory,omitempty"` +} + +// GetId returns ApplicationDeploymentRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetId() string { return v.Id } + +// GetArtifact returns ApplicationDeploymentRef.Artifact, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetArtifact() []*ArtifactRef { return v.Artifact } + +// GetApplicationEnvironment returns ApplicationDeploymentRef.ApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetApplicationEnvironment() *ApplicationEnvironmentRef { + return v.ApplicationEnvironment +} + +// GetDeployedAt returns ApplicationDeploymentRef.DeployedAt, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeployedAt() *time.Time { return v.DeployedAt } + +// GetDeploymentStage returns ApplicationDeploymentRef.DeploymentStage, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeploymentStage() DeploymentStage { return v.DeploymentStage } + +// GetSource returns ApplicationDeploymentRef.Source, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetSource() string { return v.Source } + +// GetComponent returns ApplicationDeploymentRef.Component, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetComponent() string { return v.Component } + +// GetDeployedBy returns ApplicationDeploymentRef.DeployedBy, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeployedBy() string { return v.DeployedBy } + +// GetToolsUsed returns ApplicationDeploymentRef.ToolsUsed, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetToolsUsed() *ToolsUsedRef { return v.ToolsUsed } + +// GetDeploymentRisk returns ApplicationDeploymentRef.DeploymentRisk, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeploymentRisk() *ApplicationDeploymentRiskRef { + return v.DeploymentRisk +} + +// GetPolicyRunHistory returns ApplicationDeploymentRef.PolicyRunHistory, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetPolicyRunHistory() []*RunHistoryRef { return v.PolicyRunHistory } + +type ApplicationDeploymentRiskRef struct { + Id *string `json:"id"` + SourceCodeAlertsScore *int `json:"sourceCodeAlertsScore"` + BuildAlertsScore *int `json:"buildAlertsScore"` + ArtifactAlertsScore *int `json:"artifactAlertsScore"` + DeploymentAlertsScore *int `json:"deploymentAlertsScore"` + DeploymentRiskStatus RiskStatus `json:"deploymentRiskStatus"` + ApplicationDeployment *ApplicationDeploymentRef `json:"applicationDeployment,omitempty"` +} + +// GetId returns ApplicationDeploymentRiskRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetId() *string { return v.Id } + +// GetSourceCodeAlertsScore returns ApplicationDeploymentRiskRef.SourceCodeAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetSourceCodeAlertsScore() *int { + return v.SourceCodeAlertsScore +} + +// GetBuildAlertsScore returns ApplicationDeploymentRiskRef.BuildAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetBuildAlertsScore() *int { return v.BuildAlertsScore } + +// GetArtifactAlertsScore returns ApplicationDeploymentRiskRef.ArtifactAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetArtifactAlertsScore() *int { return v.ArtifactAlertsScore } + +// GetDeploymentAlertsScore returns ApplicationDeploymentRiskRef.DeploymentAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetDeploymentAlertsScore() *int { + return v.DeploymentAlertsScore +} + +// GetDeploymentRiskStatus returns ApplicationDeploymentRiskRef.DeploymentRiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetDeploymentRiskStatus() RiskStatus { + return v.DeploymentRiskStatus +} + +// GetApplicationDeployment returns ApplicationDeploymentRiskRef.ApplicationDeployment, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetApplicationDeployment() *ApplicationDeploymentRef { + return v.ApplicationDeployment +} + +type ApplicationEnvironmentRef struct { + // id is randomly assigned + Id string `json:"id"` + Environment *EnvironmentRef `json:"environment,omitempty"` + Application *ApplicationRef `json:"application,omitempty"` + DeploymentTarget *DeploymentTargetRef `json:"deploymentTarget,omitempty"` + Namespace string `json:"namespace"` + // toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + ToolsUsed []string `json:"toolsUsed"` + Deployments []*ApplicationDeploymentRef `json:"deployments,omitempty"` + RiskStatus *ApplicationRiskStatusRef `json:"riskStatus,omitempty"` + Metadata []*KeyValueRef `json:"metadata,omitempty"` +} + +// GetId returns ApplicationEnvironmentRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetId() string { return v.Id } + +// GetEnvironment returns ApplicationEnvironmentRef.Environment, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetEnvironment() *EnvironmentRef { return v.Environment } + +// GetApplication returns ApplicationEnvironmentRef.Application, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetApplication() *ApplicationRef { return v.Application } + +// GetDeploymentTarget returns ApplicationEnvironmentRef.DeploymentTarget, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetDeploymentTarget() *DeploymentTargetRef { + return v.DeploymentTarget +} + +// GetNamespace returns ApplicationEnvironmentRef.Namespace, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetNamespace() string { return v.Namespace } + +// GetToolsUsed returns ApplicationEnvironmentRef.ToolsUsed, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetToolsUsed() []string { return v.ToolsUsed } + +// GetDeployments returns ApplicationEnvironmentRef.Deployments, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetDeployments() []*ApplicationDeploymentRef { + return v.Deployments +} + +// GetRiskStatus returns ApplicationEnvironmentRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetRiskStatus() *ApplicationRiskStatusRef { return v.RiskStatus } + +// GetMetadata returns ApplicationEnvironmentRef.Metadata, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetMetadata() []*KeyValueRef { return v.Metadata } + +type ApplicationRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Environments []*ApplicationEnvironmentRef `json:"environments,omitempty"` + Team *TeamRef `json:"team,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + Metadata []*KeyValueRef `json:"metadata,omitempty"` +} + +// GetId returns ApplicationRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetId() string { return v.Id } + +// GetName returns ApplicationRef.Name, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetName() string { return v.Name } + +// GetRoles returns ApplicationRef.Roles, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetRoles() []*RoleRef { return v.Roles } + +// GetEnvironments returns ApplicationRef.Environments, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetEnvironments() []*ApplicationEnvironmentRef { return v.Environments } + +// GetTeam returns ApplicationRef.Team, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetTeam() *TeamRef { return v.Team } + +// GetPolicies returns ApplicationRef.Policies, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns ApplicationRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetPolicyEnforcements() []*PolicyEnforcementRef { return v.PolicyEnforcements } + +// GetMetadata returns ApplicationRef.Metadata, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetMetadata() []*KeyValueRef { return v.Metadata } + +type ApplicationRiskStatusRef struct { + Id *string `json:"id"` + RiskStatus RiskStatus `json:"riskStatus"` + SourceCodeAlerts *int `json:"sourceCodeAlerts"` + BuildAlerts *int `json:"buildAlerts"` + ArtifactAlerts *int `json:"artifactAlerts"` + DeploymentAlerts *int `json:"deploymentAlerts"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + ApplicationEnvironment *ApplicationEnvironmentRef `json:"applicationEnvironment,omitempty"` +} + +// GetId returns ApplicationRiskStatusRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetId() *string { return v.Id } + +// GetRiskStatus returns ApplicationRiskStatusRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetRiskStatus() RiskStatus { return v.RiskStatus } + +// GetSourceCodeAlerts returns ApplicationRiskStatusRef.SourceCodeAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetSourceCodeAlerts() *int { return v.SourceCodeAlerts } + +// GetBuildAlerts returns ApplicationRiskStatusRef.BuildAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetBuildAlerts() *int { return v.BuildAlerts } + +// GetArtifactAlerts returns ApplicationRiskStatusRef.ArtifactAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetArtifactAlerts() *int { return v.ArtifactAlerts } + +// GetDeploymentAlerts returns ApplicationRiskStatusRef.DeploymentAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetDeploymentAlerts() *int { return v.DeploymentAlerts } + +// GetCreatedAt returns ApplicationRiskStatusRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns ApplicationRiskStatusRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetApplicationEnvironment returns ApplicationRiskStatusRef.ApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetApplicationEnvironment() *ApplicationEnvironmentRef { + return v.ApplicationEnvironment +} + +type ArtifactRef struct { + Id string `json:"id"` + ArtifactType string `json:"artifactType"` + ArtifactName string `json:"artifactName"` + ArtifactTag string `json:"artifactTag"` + ArtifactSha string `json:"artifactSha"` + ScanData []*ArtifactScanDataRef `json:"scanData,omitempty"` + ArtifactDeployment []*ApplicationDeploymentRef `json:"artifactDeployment,omitempty"` + BuildDetails *BuildToolRef `json:"buildDetails,omitempty"` +} + +// GetId returns ArtifactRef.Id, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetId() string { return v.Id } + +// GetArtifactType returns ArtifactRef.ArtifactType, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactType() string { return v.ArtifactType } + +// GetArtifactName returns ArtifactRef.ArtifactName, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactName() string { return v.ArtifactName } + +// GetArtifactTag returns ArtifactRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetArtifactSha returns ArtifactRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetScanData returns ArtifactRef.ScanData, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetScanData() []*ArtifactScanDataRef { return v.ScanData } + +// GetArtifactDeployment returns ArtifactRef.ArtifactDeployment, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactDeployment() []*ApplicationDeploymentRef { + return v.ArtifactDeployment +} + +// GetBuildDetails returns ArtifactRef.BuildDetails, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetBuildDetails() *BuildToolRef { return v.BuildDetails } + +type ArtifactScanDataRef struct { + Id string `json:"id"` + ArtifactSha string `json:"artifactSha"` + Tool string `json:"tool"` + ArtifactDetails *ArtifactRef `json:"artifactDetails,omitempty"` + LastScannedAt *time.Time `json:"lastScannedAt"` + CreatedAt *time.Time `json:"createdAt"` + VulnTrackingId string `json:"vulnTrackingId"` + Components []*ComponentRef `json:"components,omitempty"` + VulnCriticalCount *int `json:"vulnCriticalCount"` + VulnHighCount *int `json:"vulnHighCount"` + VulnMediumCount *int `json:"vulnMediumCount"` + VulnLowCount *int `json:"vulnLowCount"` + VulnInfoCount *int `json:"vulnInfoCount"` + VulnUnknownCount *int `json:"vulnUnknownCount"` + VulnNoneCount *int `json:"vulnNoneCount"` + VulnTotalCount *int `json:"vulnTotalCount"` + SbomUrl string `json:"sbomUrl"` + ArtifactLicenseScanUrl string `json:"artifactLicenseScanUrl"` + ArtifactSecretScanUrl string `json:"artifactSecretScanUrl"` + SourceLicenseScanUrl string `json:"sourceLicenseScanUrl"` + SourceSecretScanUrl string `json:"sourceSecretScanUrl"` + SourceScorecardScanUrl string `json:"sourceScorecardScanUrl"` + SourceSemgrepHighSeverityScanUrl string `json:"sourceSemgrepHighSeverityScanUrl"` + SourceSemgrepMediumSeverityScanUrl string `json:"sourceSemgrepMediumSeverityScanUrl"` + SourceSemgrepLowSeverityScanUrl string `json:"sourceSemgrepLowSeverityScanUrl"` + SourceSnykScanUrl string `json:"sourceSnykScanUrl"` + VirusTotalUrlScan string `json:"virusTotalUrlScan"` + RiskStatus RiskStatus `json:"riskStatus"` + ArtifactRunHistory []*RunHistoryRef `json:"artifactRunHistory,omitempty"` +} + +// GetId returns ArtifactScanDataRef.Id, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetId() string { return v.Id } + +// GetArtifactSha returns ArtifactScanDataRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetTool returns ArtifactScanDataRef.Tool, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetTool() string { return v.Tool } + +// GetArtifactDetails returns ArtifactScanDataRef.ArtifactDetails, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactDetails() *ArtifactRef { return v.ArtifactDetails } + +// GetLastScannedAt returns ArtifactScanDataRef.LastScannedAt, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetLastScannedAt() *time.Time { return v.LastScannedAt } + +// GetCreatedAt returns ArtifactScanDataRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetVulnTrackingId returns ArtifactScanDataRef.VulnTrackingId, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnTrackingId() string { return v.VulnTrackingId } + +// GetComponents returns ArtifactScanDataRef.Components, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetComponents() []*ComponentRef { return v.Components } + +// GetVulnCriticalCount returns ArtifactScanDataRef.VulnCriticalCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnCriticalCount() *int { return v.VulnCriticalCount } + +// GetVulnHighCount returns ArtifactScanDataRef.VulnHighCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnHighCount() *int { return v.VulnHighCount } + +// GetVulnMediumCount returns ArtifactScanDataRef.VulnMediumCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnMediumCount() *int { return v.VulnMediumCount } + +// GetVulnLowCount returns ArtifactScanDataRef.VulnLowCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnLowCount() *int { return v.VulnLowCount } + +// GetVulnInfoCount returns ArtifactScanDataRef.VulnInfoCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnInfoCount() *int { return v.VulnInfoCount } + +// GetVulnUnknownCount returns ArtifactScanDataRef.VulnUnknownCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnUnknownCount() *int { return v.VulnUnknownCount } + +// GetVulnNoneCount returns ArtifactScanDataRef.VulnNoneCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnNoneCount() *int { return v.VulnNoneCount } + +// GetVulnTotalCount returns ArtifactScanDataRef.VulnTotalCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnTotalCount() *int { return v.VulnTotalCount } + +// GetSbomUrl returns ArtifactScanDataRef.SbomUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSbomUrl() string { return v.SbomUrl } + +// GetArtifactLicenseScanUrl returns ArtifactScanDataRef.ArtifactLicenseScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactLicenseScanUrl() string { return v.ArtifactLicenseScanUrl } + +// GetArtifactSecretScanUrl returns ArtifactScanDataRef.ArtifactSecretScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactSecretScanUrl() string { return v.ArtifactSecretScanUrl } + +// GetSourceLicenseScanUrl returns ArtifactScanDataRef.SourceLicenseScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceLicenseScanUrl() string { return v.SourceLicenseScanUrl } + +// GetSourceSecretScanUrl returns ArtifactScanDataRef.SourceSecretScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSecretScanUrl() string { return v.SourceSecretScanUrl } + +// GetSourceScorecardScanUrl returns ArtifactScanDataRef.SourceScorecardScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceScorecardScanUrl() string { return v.SourceScorecardScanUrl } + +// GetSourceSemgrepHighSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepHighSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepHighSeverityScanUrl() string { + return v.SourceSemgrepHighSeverityScanUrl +} + +// GetSourceSemgrepMediumSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepMediumSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepMediumSeverityScanUrl() string { + return v.SourceSemgrepMediumSeverityScanUrl +} + +// GetSourceSemgrepLowSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepLowSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepLowSeverityScanUrl() string { + return v.SourceSemgrepLowSeverityScanUrl +} + +// GetSourceSnykScanUrl returns ArtifactScanDataRef.SourceSnykScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSnykScanUrl() string { return v.SourceSnykScanUrl } + +// GetVirusTotalUrlScan returns ArtifactScanDataRef.VirusTotalUrlScan, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVirusTotalUrlScan() string { return v.VirusTotalUrlScan } + +// GetRiskStatus returns ArtifactScanDataRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetRiskStatus() RiskStatus { return v.RiskStatus } + +// GetArtifactRunHistory returns ArtifactScanDataRef.ArtifactRunHistory, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactRunHistory() []*RunHistoryRef { return v.ArtifactRunHistory } + +type BuildToolRef struct { + // id is randomly assigned + Id string `json:"id"` + // buildId is a unique job id, run id for a job/pipeline/action + BuildId string `json:"buildId"` + // tool is jenkins etc + Tool string `json:"tool"` + // buildName is the name of the job/pipeline/action + BuildName string `json:"buildName"` + BuildUrl string `json:"buildUrl"` + ArtifactType string `json:"artifactType"` + // artifact would be something like nginx without the tag + Artifact string `json:"artifact"` + // artifactTag would be the tag of the artifact + ArtifactTag string `json:"artifactTag"` + // digest is the sha of the artifact + Digest string `json:"digest"` + // buildDigest is the sha of the artifact as sent from the build tool + BuildDigest string `json:"buildDigest"` + ArtifactNode *ArtifactRef `json:"artifactNode,omitempty"` + // buildTime is the time at which the artifact was built + BuildTime *time.Time `json:"buildTime"` + // buildUser is the user that built the artifact + BuildUser string `json:"buildUser"` + SourceCodeTool *SourceCodeToolRef `json:"sourceCodeTool,omitempty"` + CommitMetaData []*CommitMetaDataRef `json:"commitMetaData,omitempty"` + CreatedAt *time.Time `json:"createdAt"` +} + +// GetId returns BuildToolRef.Id, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetId() string { return v.Id } + +// GetBuildId returns BuildToolRef.BuildId, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildId() string { return v.BuildId } + +// GetTool returns BuildToolRef.Tool, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetTool() string { return v.Tool } + +// GetBuildName returns BuildToolRef.BuildName, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildName() string { return v.BuildName } + +// GetBuildUrl returns BuildToolRef.BuildUrl, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildUrl() string { return v.BuildUrl } + +// GetArtifactType returns BuildToolRef.ArtifactType, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactType() string { return v.ArtifactType } + +// GetArtifact returns BuildToolRef.Artifact, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifact() string { return v.Artifact } + +// GetArtifactTag returns BuildToolRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetDigest returns BuildToolRef.Digest, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetDigest() string { return v.Digest } + +// GetBuildDigest returns BuildToolRef.BuildDigest, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildDigest() string { return v.BuildDigest } + +// GetArtifactNode returns BuildToolRef.ArtifactNode, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactNode() *ArtifactRef { return v.ArtifactNode } + +// GetBuildTime returns BuildToolRef.BuildTime, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildTime() *time.Time { return v.BuildTime } + +// GetBuildUser returns BuildToolRef.BuildUser, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildUser() string { return v.BuildUser } + +// GetSourceCodeTool returns BuildToolRef.SourceCodeTool, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetSourceCodeTool() *SourceCodeToolRef { return v.SourceCodeTool } + +// GetCommitMetaData returns BuildToolRef.CommitMetaData, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetCommitMetaData() []*CommitMetaDataRef { return v.CommitMetaData } + +// GetCreatedAt returns BuildToolRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +type CWERef struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +// GetId returns CWERef.Id, and is useful for accessing the field via an interface. +func (v *CWERef) GetId() string { return v.Id } + +// GetName returns CWERef.Name, and is useful for accessing the field via an interface. +func (v *CWERef) GetName() string { return v.Name } + +// GetDescription returns CWERef.Description, and is useful for accessing the field via an interface. +func (v *CWERef) GetDescription() string { return v.Description } + +type CommitMetaDataRef struct { + // id is randomly assigned + Id *string `json:"id"` + // commit is a git commit that was used to build an artifact + Commit string `json:"commit"` + Repository string `json:"repository"` + // commitSign tells us whether the commit is signed + CommitSign *bool `json:"commitSign"` + NoOfReviewersConf *int `json:"noOfReviewersConf"` + ReviewerList []string `json:"reviewerList"` + ApproverList []string `json:"approverList"` + BuildTool *BuildToolRef `json:"buildTool,omitempty"` +} + +// GetId returns CommitMetaDataRef.Id, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetId() *string { return v.Id } + +// GetCommit returns CommitMetaDataRef.Commit, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetCommit() string { return v.Commit } + +// GetRepository returns CommitMetaDataRef.Repository, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetRepository() string { return v.Repository } + +// GetCommitSign returns CommitMetaDataRef.CommitSign, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetCommitSign() *bool { return v.CommitSign } + +// GetNoOfReviewersConf returns CommitMetaDataRef.NoOfReviewersConf, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetNoOfReviewersConf() *int { return v.NoOfReviewersConf } + +// GetReviewerList returns CommitMetaDataRef.ReviewerList, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetReviewerList() []string { return v.ReviewerList } + +// GetApproverList returns CommitMetaDataRef.ApproverList, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetApproverList() []string { return v.ApproverList } + +// GetBuildTool returns CommitMetaDataRef.BuildTool, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetBuildTool() *BuildToolRef { return v.BuildTool } + +type ComponentRef struct { + Id string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Version string `json:"version"` + Licenses []string `json:"licenses"` + Purl string `json:"purl"` + Cpe string `json:"cpe"` + ScannedAt *time.Time `json:"scannedAt"` + Vulnerabilities []*VulnerabilityRef `json:"vulnerabilities,omitempty"` + Artifacts []*ArtifactScanDataRef `json:"artifacts,omitempty"` +} + +// GetId returns ComponentRef.Id, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetId() string { return v.Id } + +// GetType returns ComponentRef.Type, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetType() string { return v.Type } + +// GetName returns ComponentRef.Name, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetName() string { return v.Name } + +// GetVersion returns ComponentRef.Version, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetVersion() string { return v.Version } + +// GetLicenses returns ComponentRef.Licenses, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetLicenses() []string { return v.Licenses } + +// GetPurl returns ComponentRef.Purl, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetPurl() string { return v.Purl } + +// GetCpe returns ComponentRef.Cpe, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetCpe() string { return v.Cpe } + +// GetScannedAt returns ComponentRef.ScannedAt, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetScannedAt() *time.Time { return v.ScannedAt } + +// GetVulnerabilities returns ComponentRef.Vulnerabilities, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetVulnerabilities() []*VulnerabilityRef { return v.Vulnerabilities } + +// GetArtifacts returns ComponentRef.Artifacts, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetArtifacts() []*ArtifactScanDataRef { return v.Artifacts } + +type CredentialsRef struct { + Id *string `json:"id"` + Data string `json:"data"` + Integrator *IntegratorRef `json:"integrator,omitempty"` +} + +// GetId returns CredentialsRef.Id, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetId() *string { return v.Id } + +// GetData returns CredentialsRef.Data, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetData() string { return v.Data } + +// GetIntegrator returns CredentialsRef.Integrator, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetIntegrator() *IntegratorRef { return v.Integrator } + +// DeploymentStage is an enum denoting the stage of the deployment. . +type DeploymentStage string + +const ( + // deployment is discovered from the events + DeploymentStageDiscovered DeploymentStage = "discovered" + // scanning is under process + DeploymentStageScanning DeploymentStage = "scanning" + // deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + DeploymentStageCurrent DeploymentStage = "current" + // deployment becomes a past deployment because another fresh deployment has happened + DeploymentStagePrevious DeploymentStage = "previous" + // deployment is blocked by the firewall + DeploymentStageBlocked DeploymentStage = "blocked" +) + +type DeploymentTargetRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + // this would be the ip/server address of the target environment + Ip string `json:"ip"` + Account string `json:"account"` + // this would be something like aws, gcp etc + TargetType string `json:"targetType"` + // this would be something like us-east-1 etc + Region string `json:"region"` + KubescapeServiceConnected string `json:"kubescapeServiceConnected"` + IsFirewall *bool `json:"isFirewall"` + Organization *OrganizationRef `json:"organization,omitempty"` + DefaultEnvironment *EnvironmentRef `json:"defaultEnvironment,omitempty"` +} + +// GetId returns DeploymentTargetRef.Id, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetId() string { return v.Id } + +// GetName returns DeploymentTargetRef.Name, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetName() string { return v.Name } + +// GetIp returns DeploymentTargetRef.Ip, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetIp() string { return v.Ip } + +// GetAccount returns DeploymentTargetRef.Account, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetAccount() string { return v.Account } + +// GetTargetType returns DeploymentTargetRef.TargetType, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetTargetType() string { return v.TargetType } + +// GetRegion returns DeploymentTargetRef.Region, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetRegion() string { return v.Region } + +// GetKubescapeServiceConnected returns DeploymentTargetRef.KubescapeServiceConnected, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetKubescapeServiceConnected() string { + return v.KubescapeServiceConnected +} + +// GetIsFirewall returns DeploymentTargetRef.IsFirewall, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetIsFirewall() *bool { return v.IsFirewall } + +// GetOrganization returns DeploymentTargetRef.Organization, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetDefaultEnvironment returns DeploymentTargetRef.DefaultEnvironment, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetDefaultEnvironment() *EnvironmentRef { return v.DefaultEnvironment } + +type EnvironmentRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Purpose string `json:"purpose"` +} + +// GetId returns EnvironmentRef.Id, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetId() string { return v.Id } + +// GetOrganization returns EnvironmentRef.Organization, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetPurpose returns EnvironmentRef.Purpose, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetPurpose() string { return v.Purpose } + +type FeatureModeRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Scan string `json:"scan"` + Type string `json:"type"` + Enabled *bool `json:"enabled"` + Category string `json:"category"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns FeatureModeRef.Id, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetId() string { return v.Id } + +// GetOrganization returns FeatureModeRef.Organization, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetScan returns FeatureModeRef.Scan, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetScan() string { return v.Scan } + +// GetType returns FeatureModeRef.Type, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetType() string { return v.Type } + +// GetEnabled returns FeatureModeRef.Enabled, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetEnabled() *bool { return v.Enabled } + +// GetCategory returns FeatureModeRef.Category, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetCategory() string { return v.Category } + +// GetCreatedAt returns FeatureModeRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns FeatureModeRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +type IntegratorRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Category string `json:"category"` + Credentials *CredentialsRef `json:"credentials,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns IntegratorRef.Id, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetId() string { return v.Id } + +// GetOrganization returns IntegratorRef.Organization, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetName returns IntegratorRef.Name, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetName() string { return v.Name } + +// GetType returns IntegratorRef.Type, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetType() string { return v.Type } + +// GetCategory returns IntegratorRef.Category, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCategory() string { return v.Category } + +// GetCredentials returns IntegratorRef.Credentials, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCredentials() *CredentialsRef { return v.Credentials } + +// GetCreatedAt returns IntegratorRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns IntegratorRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +type KeyValueRef struct { + Id string `json:"id"` + Name string `json:"name"` + Value string `json:"value"` +} + +// GetId returns KeyValueRef.Id, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetId() string { return v.Id } + +// GetName returns KeyValueRef.Name, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetName() string { return v.Name } + +// GetValue returns KeyValueRef.Value, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetValue() string { return v.Value } + +type OrganizationRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Teams []*TeamRef `json:"teams,omitempty"` + Environments []*DeploymentTargetRef `json:"environments,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + Integrators []*IntegratorRef `json:"integrators,omitempty"` + FeatureModes []*FeatureModeRef `json:"featureModes,omitempty"` +} + +// GetId returns OrganizationRef.Id, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetId() string { return v.Id } + +// GetName returns OrganizationRef.Name, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetName() string { return v.Name } + +// GetRoles returns OrganizationRef.Roles, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetRoles() []*RoleRef { return v.Roles } + +// GetTeams returns OrganizationRef.Teams, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetTeams() []*TeamRef { return v.Teams } + +// GetEnvironments returns OrganizationRef.Environments, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetEnvironments() []*DeploymentTargetRef { return v.Environments } + +// GetPolicies returns OrganizationRef.Policies, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns OrganizationRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetPolicyEnforcements() []*PolicyEnforcementRef { + return v.PolicyEnforcements +} + +// GetIntegrators returns OrganizationRef.Integrators, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetIntegrators() []*IntegratorRef { return v.Integrators } + +// GetFeatureModes returns OrganizationRef.FeatureModes, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetFeatureModes() []*FeatureModeRef { return v.FeatureModes } + +type PolicyDefinitionRef struct { + Id string `json:"id"` + OwnerOrg *OrganizationRef `json:"ownerOrg,omitempty"` + OwnerTeam *TeamRef `json:"ownerTeam,omitempty"` + OwnerApplication *ApplicationRef `json:"ownerApplication,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + PolicyName string `json:"policyName"` + Category string `json:"category"` + Stage string `json:"stage"` + Description string `json:"description"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + Script string `json:"script"` + Variables string `json:"variables"` + ConditionName string `json:"conditionName"` + Suggestion string `json:"suggestion"` +} + +// GetId returns PolicyDefinitionRef.Id, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetId() string { return v.Id } + +// GetOwnerOrg returns PolicyDefinitionRef.OwnerOrg, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerOrg() *OrganizationRef { return v.OwnerOrg } + +// GetOwnerTeam returns PolicyDefinitionRef.OwnerTeam, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerTeam() *TeamRef { return v.OwnerTeam } + +// GetOwnerApplication returns PolicyDefinitionRef.OwnerApplication, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerApplication() *ApplicationRef { return v.OwnerApplication } + +// GetCreatedAt returns PolicyDefinitionRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns PolicyDefinitionRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicyName returns PolicyDefinitionRef.PolicyName, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetPolicyName() string { return v.PolicyName } + +// GetCategory returns PolicyDefinitionRef.Category, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetCategory() string { return v.Category } + +// GetStage returns PolicyDefinitionRef.Stage, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetStage() string { return v.Stage } + +// GetDescription returns PolicyDefinitionRef.Description, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetDescription() string { return v.Description } + +// GetScheduledPolicy returns PolicyDefinitionRef.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetScript returns PolicyDefinitionRef.Script, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetScript() string { return v.Script } + +// GetVariables returns PolicyDefinitionRef.Variables, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetVariables() string { return v.Variables } + +// GetConditionName returns PolicyDefinitionRef.ConditionName, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetConditionName() string { return v.ConditionName } + +// GetSuggestion returns PolicyDefinitionRef.Suggestion, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetSuggestion() string { return v.Suggestion } + +type PolicyEnforcementRef struct { + Id *string `json:"id"` + Policy *PolicyDefinitionRef `json:"policy,omitempty"` + EnforcedOrg *OrganizationRef `json:"enforcedOrg,omitempty"` + EnforcedTeam *TeamRef `json:"enforcedTeam,omitempty"` + EnforcedApplication *ApplicationRef `json:"enforcedApplication,omitempty"` + Status *bool `json:"status"` + ForceApply *bool `json:"forceApply"` + Severity Severity `json:"severity"` + DatasourceTool string `json:"datasourceTool"` + Action string `json:"action"` + ConditionValue string `json:"conditionValue"` + Environments []*EnvironmentRef `json:"environments,omitempty"` + Tags []*TagRef `json:"tags,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns PolicyEnforcementRef.Id, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetId() *string { return v.Id } + +// GetPolicy returns PolicyEnforcementRef.Policy, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetPolicy() *PolicyDefinitionRef { return v.Policy } + +// GetEnforcedOrg returns PolicyEnforcementRef.EnforcedOrg, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedOrg() *OrganizationRef { return v.EnforcedOrg } + +// GetEnforcedTeam returns PolicyEnforcementRef.EnforcedTeam, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedTeam() *TeamRef { return v.EnforcedTeam } + +// GetEnforcedApplication returns PolicyEnforcementRef.EnforcedApplication, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedApplication() *ApplicationRef { return v.EnforcedApplication } + +// GetStatus returns PolicyEnforcementRef.Status, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetStatus() *bool { return v.Status } + +// GetForceApply returns PolicyEnforcementRef.ForceApply, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetForceApply() *bool { return v.ForceApply } + +// GetSeverity returns PolicyEnforcementRef.Severity, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetSeverity() Severity { return v.Severity } + +// GetDatasourceTool returns PolicyEnforcementRef.DatasourceTool, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetDatasourceTool() string { return v.DatasourceTool } + +// GetAction returns PolicyEnforcementRef.Action, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetAction() string { return v.Action } + +// GetConditionValue returns PolicyEnforcementRef.ConditionValue, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetConditionValue() string { return v.ConditionValue } + +// GetEnvironments returns PolicyEnforcementRef.Environments, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnvironments() []*EnvironmentRef { return v.Environments } + +// GetTags returns PolicyEnforcementRef.Tags, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetTags() []*TagRef { return v.Tags } + +// GetCreatedAt returns PolicyEnforcementRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns PolicyEnforcementRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// QueryApplicationDeploymentWArtifactQueryApplicationDeployment includes the requested fields of the GraphQL type ApplicationDeployment. +// The GraphQL type's documentation follows. +// +// ApplicationDeployment tells us about the the artifact deployed along with its associated details. +type QueryApplicationDeploymentWArtifactQueryApplicationDeployment struct { + // id is randomly assigned + Id string `json:"id"` + // artifact that is deployed + Artifact []*QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact `json:"artifact"` +} + +// GetId returns QueryApplicationDeploymentWArtifactQueryApplicationDeployment.Id, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactQueryApplicationDeployment) GetId() string { return v.Id } + +// GetArtifact returns QueryApplicationDeploymentWArtifactQueryApplicationDeployment.Artifact, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactQueryApplicationDeployment) GetArtifact() []*QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact { + return v.Artifact +} + +// QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact includes the requested fields of the GraphQL type Artifact. +type QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact struct { + Id string `json:"id"` + ArtifactDeployment []*QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment `json:"artifactDeployment"` +} + +// GetId returns QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact.Id, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact) GetId() string { + return v.Id +} + +// GetArtifactDeployment returns QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact.ArtifactDeployment, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifact) GetArtifactDeployment() []*QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment { + return v.ArtifactDeployment +} + +// QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment includes the requested fields of the GraphQL type ApplicationDeployment. +// The GraphQL type's documentation follows. +// +// ApplicationDeployment tells us about the the artifact deployed along with its associated details. +type QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment struct { + // id is randomly assigned + Id string `json:"id"` +} + +// GetId returns QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment.Id, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactQueryApplicationDeploymentArtifactArtifactDeploymentApplicationDeployment) GetId() string { + return v.Id +} + +// QueryApplicationDeploymentWArtifactResponse is returned by QueryApplicationDeploymentWArtifact on success. +type QueryApplicationDeploymentWArtifactResponse struct { + QueryApplicationDeployment []*QueryApplicationDeploymentWArtifactQueryApplicationDeployment `json:"queryApplicationDeployment"` +} + +// GetQueryApplicationDeployment returns QueryApplicationDeploymentWArtifactResponse.QueryApplicationDeployment, and is useful for accessing the field via an interface. +func (v *QueryApplicationDeploymentWArtifactResponse) GetQueryApplicationDeployment() []*QueryApplicationDeploymentWArtifactQueryApplicationDeployment { + return v.QueryApplicationDeployment +} + +// QuerySecurityIssueQuerySecurityIssue includes the requested fields of the GraphQL type SecurityIssue. +type QuerySecurityIssueQuerySecurityIssue struct { + Id *string `json:"id"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` +} + +// GetId returns QuerySecurityIssueQuerySecurityIssue.Id, and is useful for accessing the field via an interface. +func (v *QuerySecurityIssueQuerySecurityIssue) GetId() *string { return v.Id } + +// GetCreatedAt returns QuerySecurityIssueQuerySecurityIssue.CreatedAt, and is useful for accessing the field via an interface. +func (v *QuerySecurityIssueQuerySecurityIssue) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns QuerySecurityIssueQuerySecurityIssue.UpdatedAt, and is useful for accessing the field via an interface. +func (v *QuerySecurityIssueQuerySecurityIssue) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// QuerySecurityIssueResponse is returned by QuerySecurityIssue on success. +type QuerySecurityIssueResponse struct { + QuerySecurityIssue []*QuerySecurityIssueQuerySecurityIssue `json:"querySecurityIssue"` +} + +// GetQuerySecurityIssue returns QuerySecurityIssueResponse.QuerySecurityIssue, and is useful for accessing the field via an interface. +func (v *QuerySecurityIssueResponse) GetQuerySecurityIssue() []*QuerySecurityIssueQuerySecurityIssue { + return v.QuerySecurityIssue +} + +// RiskStatus tells us what risk a current application instance or a deployment is at. +type RiskStatus string + +const ( + RiskStatusLowrisk RiskStatus = "lowrisk" + RiskStatusMediumrisk RiskStatus = "mediumrisk" + RiskStatusHighrisk RiskStatus = "highrisk" + RiskStatusApocalypserisk RiskStatus = "apocalypserisk" + RiskStatusScanning RiskStatus = "scanning" +) + +type RolePermission string + +const ( + RolePermissionAdmin RolePermission = "admin" + RolePermissionWrite RolePermission = "write" + RolePermissionRead RolePermission = "read" +) + +type RoleRef struct { + // id is randomly assigned + Id string `json:"id"` + // group should be a URI format that includes a scope or realm + Group string `json:"group"` + Permission RolePermission `json:"permission"` +} + +// GetId returns RoleRef.Id, and is useful for accessing the field via an interface. +func (v *RoleRef) GetId() string { return v.Id } + +// GetGroup returns RoleRef.Group, and is useful for accessing the field via an interface. +func (v *RoleRef) GetGroup() string { return v.Group } + +// GetPermission returns RoleRef.Permission, and is useful for accessing the field via an interface. +func (v *RoleRef) GetPermission() RolePermission { return v.Permission } + +type RunHistoryRef struct { + Id *string `json:"id"` + PolicyId string `json:"policyId"` + ApplicationDeployment *ApplicationDeploymentRef `json:"applicationDeployment,omitempty"` + ArtifactScan *ArtifactScanDataRef `json:"artifactScan,omitempty"` + PolicyName string `json:"PolicyName"` + Stage string `json:"Stage"` + Artifact string `json:"Artifact"` + ArtifactTag string `json:"ArtifactTag"` + ArtifactSha string `json:"ArtifactSha"` + ArtifactNameTag string `json:"ArtifactNameTag"` + DatasourceTool string `json:"DatasourceTool"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + DeployedAt *time.Time `json:"DeployedAt"` + Hash string `json:"Hash"` + Pass *bool `json:"Pass"` + MetaData string `json:"MetaData"` + FileApi string `json:"FileApi"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + PolicyEnforcements *PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + SecurityIssue *SecurityIssueRef `json:"securityIssue,omitempty"` +} + +// GetId returns RunHistoryRef.Id, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetId() *string { return v.Id } + +// GetPolicyId returns RunHistoryRef.PolicyId, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyId() string { return v.PolicyId } + +// GetApplicationDeployment returns RunHistoryRef.ApplicationDeployment, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetApplicationDeployment() *ApplicationDeploymentRef { + return v.ApplicationDeployment +} + +// GetArtifactScan returns RunHistoryRef.ArtifactScan, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactScan() *ArtifactScanDataRef { return v.ArtifactScan } + +// GetPolicyName returns RunHistoryRef.PolicyName, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyName() string { return v.PolicyName } + +// GetStage returns RunHistoryRef.Stage, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetStage() string { return v.Stage } + +// GetArtifact returns RunHistoryRef.Artifact, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifact() string { return v.Artifact } + +// GetArtifactTag returns RunHistoryRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetArtifactSha returns RunHistoryRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetArtifactNameTag returns RunHistoryRef.ArtifactNameTag, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactNameTag() string { return v.ArtifactNameTag } + +// GetDatasourceTool returns RunHistoryRef.DatasourceTool, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetDatasourceTool() string { return v.DatasourceTool } + +// GetCreatedAt returns RunHistoryRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns RunHistoryRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetDeployedAt returns RunHistoryRef.DeployedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetDeployedAt() *time.Time { return v.DeployedAt } + +// GetHash returns RunHistoryRef.Hash, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetHash() string { return v.Hash } + +// GetPass returns RunHistoryRef.Pass, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPass() *bool { return v.Pass } + +// GetMetaData returns RunHistoryRef.MetaData, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetMetaData() string { return v.MetaData } + +// GetFileApi returns RunHistoryRef.FileApi, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetFileApi() string { return v.FileApi } + +// GetScheduledPolicy returns RunHistoryRef.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetPolicyEnforcements returns RunHistoryRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyEnforcements() *PolicyEnforcementRef { return v.PolicyEnforcements } + +// GetSecurityIssue returns RunHistoryRef.SecurityIssue, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetSecurityIssue() *SecurityIssueRef { return v.SecurityIssue } + +type SecurityIssueRef struct { + Id *string `json:"id"` + AlertTitle string `json:"AlertTitle"` + AlertMessage string `json:"AlertMessage"` + Suggestions string `json:"Suggestions"` + Severity Severity `json:"Severity"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + Action string `json:"Action"` + JiraUrl string `json:"JiraUrl"` + Status string `json:"Status"` + Reason string `json:"Reason"` + Error string `json:"Error"` + Affects []*RunHistoryRef `json:"Affects,omitempty"` +} + +// GetId returns SecurityIssueRef.Id, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetId() *string { return v.Id } + +// GetAlertTitle returns SecurityIssueRef.AlertTitle, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAlertTitle() string { return v.AlertTitle } + +// GetAlertMessage returns SecurityIssueRef.AlertMessage, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAlertMessage() string { return v.AlertMessage } + +// GetSuggestions returns SecurityIssueRef.Suggestions, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetSuggestions() string { return v.Suggestions } + +// GetSeverity returns SecurityIssueRef.Severity, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetSeverity() Severity { return v.Severity } + +// GetCreatedAt returns SecurityIssueRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns SecurityIssueRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetAction returns SecurityIssueRef.Action, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAction() string { return v.Action } + +// GetJiraUrl returns SecurityIssueRef.JiraUrl, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetJiraUrl() string { return v.JiraUrl } + +// GetStatus returns SecurityIssueRef.Status, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetStatus() string { return v.Status } + +// GetReason returns SecurityIssueRef.Reason, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetReason() string { return v.Reason } + +// GetError returns SecurityIssueRef.Error, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetError() string { return v.Error } + +// GetAffects returns SecurityIssueRef.Affects, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAffects() []*RunHistoryRef { return v.Affects } + +type Severity string + +const ( + SeverityCritical Severity = "critical" + SeverityHigh Severity = "high" + SeverityMedium Severity = "medium" + SeverityLow Severity = "low" + SeverityInfo Severity = "info" + SeverityNone Severity = "none" + SeverityUnknown Severity = "unknown" +) + +type SourceCodeToolRef struct { + // id is randomly assigned + Id string `json:"id"` + CreatedAt *time.Time `json:"createdAt"` + // scm is the scm tool github/gitlab etc + Scm string `json:"scm"` + // repository is the git remote repository + Repository string `json:"repository"` + // branch is the git branch on which the artifact was built + Branch string `json:"branch"` + // headCommit is the checkout out head commit + HeadCommit string `json:"headCommit"` + // diffCommits is a comma separated string of the commits between the previous built artifact and the current + DiffCommits string `json:"diffCommits"` + LicenseName string `json:"licenseName"` + Visibility string `json:"visibility"` + WorkflowName string `json:"workflowName"` + // parentRepo is populated in case the git repo is a fork + ParentRepo string `json:"parentRepo"` + BuildTool *BuildToolRef `json:"buildTool,omitempty"` +} + +// GetId returns SourceCodeToolRef.Id, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetId() string { return v.Id } + +// GetCreatedAt returns SourceCodeToolRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetScm returns SourceCodeToolRef.Scm, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetScm() string { return v.Scm } + +// GetRepository returns SourceCodeToolRef.Repository, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetRepository() string { return v.Repository } + +// GetBranch returns SourceCodeToolRef.Branch, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetBranch() string { return v.Branch } + +// GetHeadCommit returns SourceCodeToolRef.HeadCommit, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetHeadCommit() string { return v.HeadCommit } + +// GetDiffCommits returns SourceCodeToolRef.DiffCommits, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetDiffCommits() string { return v.DiffCommits } + +// GetLicenseName returns SourceCodeToolRef.LicenseName, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetLicenseName() string { return v.LicenseName } + +// GetVisibility returns SourceCodeToolRef.Visibility, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetVisibility() string { return v.Visibility } + +// GetWorkflowName returns SourceCodeToolRef.WorkflowName, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetWorkflowName() string { return v.WorkflowName } + +// GetParentRepo returns SourceCodeToolRef.ParentRepo, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetParentRepo() string { return v.ParentRepo } + +// GetBuildTool returns SourceCodeToolRef.BuildTool, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetBuildTool() *BuildToolRef { return v.BuildTool } + +type TagRef struct { + Id string `json:"id"` + TagName string `json:"tagName"` + TagValue string `json:"tagValue"` + TagDescription string `json:"tagDescription"` + CreatedBy string `json:"createdBy"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + Policies []*PolicyEnforcementRef `json:"policies,omitempty"` +} + +// GetId returns TagRef.Id, and is useful for accessing the field via an interface. +func (v *TagRef) GetId() string { return v.Id } + +// GetTagName returns TagRef.TagName, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagName() string { return v.TagName } + +// GetTagValue returns TagRef.TagValue, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagValue() string { return v.TagValue } + +// GetTagDescription returns TagRef.TagDescription, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagDescription() string { return v.TagDescription } + +// GetCreatedBy returns TagRef.CreatedBy, and is useful for accessing the field via an interface. +func (v *TagRef) GetCreatedBy() string { return v.CreatedBy } + +// GetCreatedAt returns TagRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *TagRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns TagRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *TagRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicies returns TagRef.Policies, and is useful for accessing the field via an interface. +func (v *TagRef) GetPolicies() []*PolicyEnforcementRef { return v.Policies } + +type TeamRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Organization *OrganizationRef `json:"organization,omitempty"` + Applications []*ApplicationRef `json:"applications,omitempty"` + Labels []*KeyValueRef `json:"labels,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` +} + +// GetId returns TeamRef.Id, and is useful for accessing the field via an interface. +func (v *TeamRef) GetId() string { return v.Id } + +// GetName returns TeamRef.Name, and is useful for accessing the field via an interface. +func (v *TeamRef) GetName() string { return v.Name } + +// GetRoles returns TeamRef.Roles, and is useful for accessing the field via an interface. +func (v *TeamRef) GetRoles() []*RoleRef { return v.Roles } + +// GetOrganization returns TeamRef.Organization, and is useful for accessing the field via an interface. +func (v *TeamRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetApplications returns TeamRef.Applications, and is useful for accessing the field via an interface. +func (v *TeamRef) GetApplications() []*ApplicationRef { return v.Applications } + +// GetLabels returns TeamRef.Labels, and is useful for accessing the field via an interface. +func (v *TeamRef) GetLabels() []*KeyValueRef { return v.Labels } + +// GetPolicies returns TeamRef.Policies, and is useful for accessing the field via an interface. +func (v *TeamRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns TeamRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *TeamRef) GetPolicyEnforcements() []*PolicyEnforcementRef { return v.PolicyEnforcements } + +type ToolsUsedRef struct { + Id *string `json:"id"` + Source string `json:"source"` + Build string `json:"build"` + Artifact string `json:"artifact"` + Deploy string `json:"deploy"` + Sbom string `json:"sbom"` + Misc []string `json:"misc"` +} + +// GetId returns ToolsUsedRef.Id, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetId() *string { return v.Id } + +// GetSource returns ToolsUsedRef.Source, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetSource() string { return v.Source } + +// GetBuild returns ToolsUsedRef.Build, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetBuild() string { return v.Build } + +// GetArtifact returns ToolsUsedRef.Artifact, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetArtifact() string { return v.Artifact } + +// GetDeploy returns ToolsUsedRef.Deploy, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetDeploy() string { return v.Deploy } + +// GetSbom returns ToolsUsedRef.Sbom, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetSbom() string { return v.Sbom } + +// GetMisc returns ToolsUsedRef.Misc, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetMisc() []string { return v.Misc } + +// UpdateApplicationEnvironmentWithToolsResponse is returned by UpdateApplicationEnvironmentWithTools on success. +type UpdateApplicationEnvironmentWithToolsResponse struct { + UpdateApplicationEnvironment *UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload `json:"updateApplicationEnvironment"` +} + +// GetUpdateApplicationEnvironment returns UpdateApplicationEnvironmentWithToolsResponse.UpdateApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *UpdateApplicationEnvironmentWithToolsResponse) GetUpdateApplicationEnvironment() *UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload { + return v.UpdateApplicationEnvironment +} + +// UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload includes the requested fields of the GraphQL type UpdateApplicationEnvironmentPayload. +type UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload.NumUids, and is useful for accessing the field via an interface. +func (v *UpdateApplicationEnvironmentWithToolsUpdateApplicationEnvironmentUpdateApplicationEnvironmentPayload) GetNumUids() *int { + return v.NumUids +} + +// UpdateArtifactWDeploymentIdsResponse is returned by UpdateArtifactWDeploymentIds on success. +type UpdateArtifactWDeploymentIdsResponse struct { + UpdateArtifact *UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload `json:"updateArtifact"` +} + +// GetUpdateArtifact returns UpdateArtifactWDeploymentIdsResponse.UpdateArtifact, and is useful for accessing the field via an interface. +func (v *UpdateArtifactWDeploymentIdsResponse) GetUpdateArtifact() *UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload { + return v.UpdateArtifact +} + +// UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload includes the requested fields of the GraphQL type UpdateArtifactPayload. +type UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload.NumUids, and is useful for accessing the field via an interface. +func (v *UpdateArtifactWDeploymentIdsUpdateArtifactUpdateArtifactPayload) GetNumUids() *int { + return v.NumUids +} + +// UpdateRunHistoryResponse is returned by UpdateRunHistory on success. +type UpdateRunHistoryResponse struct { + UpdateRunHistory *UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload `json:"updateRunHistory"` +} + +// GetUpdateRunHistory returns UpdateRunHistoryResponse.UpdateRunHistory, and is useful for accessing the field via an interface. +func (v *UpdateRunHistoryResponse) GetUpdateRunHistory() *UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload { + return v.UpdateRunHistory +} + +// UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload includes the requested fields of the GraphQL type UpdateRunHistoryPayload. +type UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload.NumUids, and is useful for accessing the field via an interface. +func (v *UpdateRunHistoryUpdateRunHistoryUpdateRunHistoryPayload) GetNumUids() *int { return v.NumUids } + +// UpdateSecurityIssueResponse is returned by UpdateSecurityIssue on success. +type UpdateSecurityIssueResponse struct { + UpdateSecurityIssue *UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload `json:"updateSecurityIssue"` +} + +// GetUpdateSecurityIssue returns UpdateSecurityIssueResponse.UpdateSecurityIssue, and is useful for accessing the field via an interface. +func (v *UpdateSecurityIssueResponse) GetUpdateSecurityIssue() *UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload { + return v.UpdateSecurityIssue +} + +// UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload includes the requested fields of the GraphQL type UpdateSecurityIssuePayload. +type UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload.NumUids, and is useful for accessing the field via an interface. +func (v *UpdateSecurityIssueUpdateSecurityIssueUpdateSecurityIssuePayload) GetNumUids() *int { + return v.NumUids +} + +type VulnerabilityRef struct { + Id string `json:"id"` + Parent string `json:"parent"` + Ratings Severity `json:"ratings"` + Cwes []*CWERef `json:"cwes,omitempty"` + Summary string `json:"summary"` + Detail string `json:"detail"` + Recommendation string `json:"recommendation"` + Published *time.Time `json:"published"` + Modified *time.Time `json:"modified"` + CreatedAt *time.Time `json:"createdAt"` + Cvss float64 `json:"cvss"` + Priority string `json:"priority"` + Epss float64 `json:"epss"` + Cisa_kev string `json:"cisa_kev"` + Affects []*ComponentRef `json:"affects,omitempty"` +} + +// GetId returns VulnerabilityRef.Id, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetId() string { return v.Id } + +// GetParent returns VulnerabilityRef.Parent, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetParent() string { return v.Parent } + +// GetRatings returns VulnerabilityRef.Ratings, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetRatings() Severity { return v.Ratings } + +// GetCwes returns VulnerabilityRef.Cwes, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCwes() []*CWERef { return v.Cwes } + +// GetSummary returns VulnerabilityRef.Summary, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetSummary() string { return v.Summary } + +// GetDetail returns VulnerabilityRef.Detail, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetDetail() string { return v.Detail } + +// GetRecommendation returns VulnerabilityRef.Recommendation, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetRecommendation() string { return v.Recommendation } + +// GetPublished returns VulnerabilityRef.Published, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetPublished() *time.Time { return v.Published } + +// GetModified returns VulnerabilityRef.Modified, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetModified() *time.Time { return v.Modified } + +// GetCreatedAt returns VulnerabilityRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetCvss returns VulnerabilityRef.Cvss, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCvss() float64 { return v.Cvss } + +// GetPriority returns VulnerabilityRef.Priority, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetPriority() string { return v.Priority } + +// GetEpss returns VulnerabilityRef.Epss, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetEpss() float64 { return v.Epss } + +// GetCisa_kev returns VulnerabilityRef.Cisa_kev, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCisa_kev() string { return v.Cisa_kev } + +// GetAffects returns VulnerabilityRef.Affects, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetAffects() []*ComponentRef { return v.Affects } + +// __AddSecurityIssueInput is used internally by genqlient +type __AddSecurityIssueInput struct { + Input *AddSecurityIssueInput `json:"input,omitempty"` +} + +// GetInput returns __AddSecurityIssueInput.Input, and is useful for accessing the field via an interface. +func (v *__AddSecurityIssueInput) GetInput() *AddSecurityIssueInput { return v.Input } + +// __QuerySecurityIssueInput is used internally by genqlient +type __QuerySecurityIssueInput struct { + AlertTitle string `json:"alertTitle"` + AlertMsg string `json:"alertMsg"` + Suggestion string `json:"suggestion"` + Severity Severity `json:"severity"` + ErrorMsg string `json:"errorMsg"` + Action string `json:"action"` +} + +// GetAlertTitle returns __QuerySecurityIssueInput.AlertTitle, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetAlertTitle() string { return v.AlertTitle } + +// GetAlertMsg returns __QuerySecurityIssueInput.AlertMsg, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetAlertMsg() string { return v.AlertMsg } + +// GetSuggestion returns __QuerySecurityIssueInput.Suggestion, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetSuggestion() string { return v.Suggestion } + +// GetSeverity returns __QuerySecurityIssueInput.Severity, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetSeverity() Severity { return v.Severity } + +// GetErrorMsg returns __QuerySecurityIssueInput.ErrorMsg, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetErrorMsg() string { return v.ErrorMsg } + +// GetAction returns __QuerySecurityIssueInput.Action, and is useful for accessing the field via an interface. +func (v *__QuerySecurityIssueInput) GetAction() string { return v.Action } + +// __UpdateApplicationEnvironmentWithToolsInput is used internally by genqlient +type __UpdateApplicationEnvironmentWithToolsInput struct { + Id string `json:"id"` + Tools []string `json:"tools"` +} + +// GetId returns __UpdateApplicationEnvironmentWithToolsInput.Id, and is useful for accessing the field via an interface. +func (v *__UpdateApplicationEnvironmentWithToolsInput) GetId() string { return v.Id } + +// GetTools returns __UpdateApplicationEnvironmentWithToolsInput.Tools, and is useful for accessing the field via an interface. +func (v *__UpdateApplicationEnvironmentWithToolsInput) GetTools() []string { return v.Tools } + +// __UpdateArtifactWDeploymentIdsInput is used internally by genqlient +type __UpdateArtifactWDeploymentIdsInput struct { + ArtifactId string `json:"artifactId"` + DeploymentRefs []*ApplicationDeploymentRef `json:"deploymentRefs,omitempty"` +} + +// GetArtifactId returns __UpdateArtifactWDeploymentIdsInput.ArtifactId, and is useful for accessing the field via an interface. +func (v *__UpdateArtifactWDeploymentIdsInput) GetArtifactId() string { return v.ArtifactId } + +// GetDeploymentRefs returns __UpdateArtifactWDeploymentIdsInput.DeploymentRefs, and is useful for accessing the field via an interface. +func (v *__UpdateArtifactWDeploymentIdsInput) GetDeploymentRefs() []*ApplicationDeploymentRef { + return v.DeploymentRefs +} + +// __UpdateRunHistoryInput is used internally by genqlient +type __UpdateRunHistoryInput struct { + RunHistoryId *string `json:"runHistoryId"` + SecurityIssueId *string `json:"securityIssueId"` +} + +// GetRunHistoryId returns __UpdateRunHistoryInput.RunHistoryId, and is useful for accessing the field via an interface. +func (v *__UpdateRunHistoryInput) GetRunHistoryId() *string { return v.RunHistoryId } + +// GetSecurityIssueId returns __UpdateRunHistoryInput.SecurityIssueId, and is useful for accessing the field via an interface. +func (v *__UpdateRunHistoryInput) GetSecurityIssueId() *string { return v.SecurityIssueId } + +// __UpdateSecurityIssueInput is used internally by genqlient +type __UpdateSecurityIssueInput struct { + SecurityIssue *string `json:"securityIssue"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetSecurityIssue returns __UpdateSecurityIssueInput.SecurityIssue, and is useful for accessing the field via an interface. +func (v *__UpdateSecurityIssueInput) GetSecurityIssue() *string { return v.SecurityIssue } + +// GetCreatedAt returns __UpdateSecurityIssueInput.CreatedAt, and is useful for accessing the field via an interface. +func (v *__UpdateSecurityIssueInput) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns __UpdateSecurityIssueInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *__UpdateSecurityIssueInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// The query or mutation executed by AddSecurityIssue. +const AddSecurityIssue_Operation = ` +mutation AddSecurityIssue ($input: AddSecurityIssueInput!) { + addSecurityIssue(input: [$input]) { + securityIssue { + id + } + } +} +` + +func AddSecurityIssue( + ctx_ context.Context, + client_ graphql.Client, + input *AddSecurityIssueInput, +) (*AddSecurityIssueResponse, error) { + req_ := &graphql.Request{ + OpName: "AddSecurityIssue", + Query: AddSecurityIssue_Operation, + Variables: &__AddSecurityIssueInput{ + Input: input, + }, + } + var err_ error + + var data_ AddSecurityIssueResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by AppEnvTools. +const AppEnvTools_Operation = ` +query AppEnvTools { + queryApplicationEnvironment { + id + deployments(order: {asc:deployedAt}) { + id + policyRunHistory(order: {asc:CreatedAt}) { + id + DatasourceTool + } + } + } +} +` + +func AppEnvTools( + ctx_ context.Context, + client_ graphql.Client, +) (*AppEnvToolsResponse, error) { + req_ := &graphql.Request{ + OpName: "AppEnvTools", + Query: AppEnvTools_Operation, + } + var err_ error + + var data_ AppEnvToolsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by QueryApplicationDeploymentWArtifact. +const QueryApplicationDeploymentWArtifact_Operation = ` +query QueryApplicationDeploymentWArtifact { + queryApplicationDeployment(order: {asc:deployedAt}) { + id + artifact { + id + artifactDeployment { + id + } + } + } +} +` + +func QueryApplicationDeploymentWArtifact( + ctx_ context.Context, + client_ graphql.Client, +) (*QueryApplicationDeploymentWArtifactResponse, error) { + req_ := &graphql.Request{ + OpName: "QueryApplicationDeploymentWArtifact", + Query: QueryApplicationDeploymentWArtifact_Operation, + } + var err_ error + + var data_ QueryApplicationDeploymentWArtifactResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by QuerySecurityIssue. +const QuerySecurityIssue_Operation = ` +query QuerySecurityIssue ($alertTitle: String!, $alertMsg: String!, $suggestion: String!, $severity: Severity!, $errorMsg: String!, $action: String!) { + querySecurityIssue(filter: {AlertTitle:{eq:$alertTitle},AlertMessage:{eq:$alertMsg},Suggestions:{eq:$suggestion},Severity:{eq:$severity},Action:{eq:$action},Error:{eq:$errorMsg}}) { + id + CreatedAt + UpdatedAt + } +} +` + +func QuerySecurityIssue( + ctx_ context.Context, + client_ graphql.Client, + alertTitle string, + alertMsg string, + suggestion string, + severity Severity, + errorMsg string, + action string, +) (*QuerySecurityIssueResponse, error) { + req_ := &graphql.Request{ + OpName: "QuerySecurityIssue", + Query: QuerySecurityIssue_Operation, + Variables: &__QuerySecurityIssueInput{ + AlertTitle: alertTitle, + AlertMsg: alertMsg, + Suggestion: suggestion, + Severity: severity, + ErrorMsg: errorMsg, + Action: action, + }, + } + var err_ error + + var data_ QuerySecurityIssueResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by UpdateApplicationEnvironmentWithTools. +const UpdateApplicationEnvironmentWithTools_Operation = ` +mutation UpdateApplicationEnvironmentWithTools ($id: String!, $tools: [String!]!) { + updateApplicationEnvironment(input: {filter:{id:{eq:$id}},set:{toolsUsed:$tools}}) { + numUids + } +} +` + +func UpdateApplicationEnvironmentWithTools( + ctx_ context.Context, + client_ graphql.Client, + id string, + tools []string, +) (*UpdateApplicationEnvironmentWithToolsResponse, error) { + req_ := &graphql.Request{ + OpName: "UpdateApplicationEnvironmentWithTools", + Query: UpdateApplicationEnvironmentWithTools_Operation, + Variables: &__UpdateApplicationEnvironmentWithToolsInput{ + Id: id, + Tools: tools, + }, + } + var err_ error + + var data_ UpdateApplicationEnvironmentWithToolsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by UpdateArtifactWDeploymentIds. +const UpdateArtifactWDeploymentIds_Operation = ` +mutation UpdateArtifactWDeploymentIds ($artifactId: String!, $deploymentRefs: [ApplicationDeploymentRef!]!) { + updateArtifact(input: {filter:{id:{eq:$artifactId}},set:{artifactDeployment:$deploymentRefs}}) { + numUids + } +} +` + +func UpdateArtifactWDeploymentIds( + ctx_ context.Context, + client_ graphql.Client, + artifactId string, + deploymentRefs []*ApplicationDeploymentRef, +) (*UpdateArtifactWDeploymentIdsResponse, error) { + req_ := &graphql.Request{ + OpName: "UpdateArtifactWDeploymentIds", + Query: UpdateArtifactWDeploymentIds_Operation, + Variables: &__UpdateArtifactWDeploymentIdsInput{ + ArtifactId: artifactId, + DeploymentRefs: deploymentRefs, + }, + } + var err_ error + + var data_ UpdateArtifactWDeploymentIdsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by UpdateRunHistory. +const UpdateRunHistory_Operation = ` +mutation UpdateRunHistory ($runHistoryId: ID!, $securityIssueId: ID!) { + updateRunHistory(input: {filter:{id:[$runHistoryId]},set:{securityIssue:{id:$securityIssueId}}}) { + numUids + } +} +` + +func UpdateRunHistory( + ctx_ context.Context, + client_ graphql.Client, + runHistoryId *string, + securityIssueId *string, +) (*UpdateRunHistoryResponse, error) { + req_ := &graphql.Request{ + OpName: "UpdateRunHistory", + Query: UpdateRunHistory_Operation, + Variables: &__UpdateRunHistoryInput{ + RunHistoryId: runHistoryId, + SecurityIssueId: securityIssueId, + }, + } + var err_ error + + var data_ UpdateRunHistoryResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by UpdateSecurityIssue. +const UpdateSecurityIssue_Operation = ` +mutation UpdateSecurityIssue ($securityIssue: ID!, $createdAt: DateTime, $updatedAt: DateTime) { + updateSecurityIssue(input: {set:{CreatedAt:$createdAt,UpdatedAt:$updatedAt},filter:{id:[$securityIssue]}}) { + numUids + } +} +` + +func UpdateSecurityIssue( + ctx_ context.Context, + client_ graphql.Client, + securityIssue *string, + createdAt *time.Time, + updatedAt *time.Time, +) (*UpdateSecurityIssueResponse, error) { + req_ := &graphql.Request{ + OpName: "UpdateSecurityIssue", + Query: UpdateSecurityIssue_Operation, + Variables: &__UpdateSecurityIssueInput{ + SecurityIssue: securityIssue, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + } + var err_ error + + var data_ UpdateSecurityIssueResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} diff --git a/april2024june2024/june2024/schema.graphql b/april2024june2024/june2024/schema.graphql new file mode 100644 index 0000000..dc40d4a --- /dev/null +++ b/april2024june2024/june2024/schema.graphql @@ -0,0 +1,4656 @@ +directive @auth(password: AuthRule, query: AuthRule, add: AuthRule, update: AuthRule, delete: AuthRule) on OBJECT | INTERFACE + +directive @hasInverse(field: String!) on FIELD_DEFINITION + +directive @search(by: [DgraphIndex!]) on FIELD_DEFINITION + +directive @id(interface: Boolean) on FIELD_DEFINITION + +directive @secret(field: String!, pred: String) on OBJECT | INTERFACE + +directive @remote on OBJECT | INTERFACE | UNION | INPUT_OBJECT | ENUM + +directive @lambda on FIELD_DEFINITION + +directive @custom(http: CustomHTTP, dql: String) on FIELD_DEFINITION + +directive @remoteResponse(name: String) on FIELD_DEFINITION + +directive @withSubscription on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @lambdaOnMutate(add: Boolean, update: Boolean, delete: Boolean) on OBJECT | INTERFACE + +directive @cacheControl(maxAge: Int!) on QUERY + +directive @generate(query: GenerateQueryParams, mutation: GenerateMutationParams, subscription: Boolean) on OBJECT | INTERFACE + +directive @dgraph(type: String, pred: String) on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @cascade(fields: [String]) on FIELD + +input AddApplicationDeploymentInput { + """id is randomly assigned""" + id: String! + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef! + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +type AddApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input AddApplicationDeploymentRiskInput { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef! +} + +type AddApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input AddApplicationEnvironmentInput { + """id is randomly assigned""" + id: String! + environment: EnvironmentRef + application: ApplicationRef! + deploymentTarget: DeploymentTargetRef! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +type AddApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input AddApplicationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef! + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +type AddApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input AddApplicationRiskStatusInput { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironmentRef! +} + +type AddApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input AddArtifactInput { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type AddArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input AddArtifactScanDataInput { + id: String! + artifactSha: String! + tool: String! + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +type AddArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input AddBuildToolInput { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime! +} + +type AddBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input AddCommitMetaDataInput { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef! +} + +type AddCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input AddComponentInput { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +type AddComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input AddCredentialsInput { + data: String! + integrator: IntegratorRef! +} + +type AddCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input AddCWEInput { + id: String! + name: String! + description: String +} + +type AddCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input AddDeploymentTargetInput { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef! + defaultEnvironment: EnvironmentRef! +} + +type AddDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input AddEnvironmentInput { + id: String! + organization: OrganizationRef! + purpose: String! +} + +type AddEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input AddFeatureModeInput { + id: String! + organization: OrganizationRef! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input AddIntegratorInput { + id: String! + organization: OrganizationRef! + name: String! + type: String! + category: String! + credentials: CredentialsRef! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input AddKeyValueInput { + id: String! + name: String! + value: String! +} + +type AddKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input AddOrganizationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type AddOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input AddPolicyDefinitionInput { + id: String! + ownerOrg: OrganizationRef! + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type AddPolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input AddPolicyEnforcementInput { + policy: PolicyDefinitionRef! + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddPolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input AddRoleInput { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type AddRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input AddRunHistoryInput { + policyId: String! + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements: PolicyEnforcementRef! + securityIssue: SecurityIssueRef +} + +type AddRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input AddSchemaVersionInput { + version: String! +} + +type AddSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input AddSecurityIssueInput { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +type AddSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input AddSourceCodeToolInput { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef! +} + +type AddSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input AddTagInput { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcementRef!] +} + +type AddTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input AddTeamInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + organization: OrganizationRef! + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type AddTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input AddToolsUsedInput { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type AddToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input AddVulnerabilityInput { + id: String! + parent: String! + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +type AddVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Application implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + environments(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment!] + team(filter: TeamFilter): Team! + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + environmentsAggregate(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + """id is randomly assigned""" + id: String! + + """artifact that is deployed""" + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact!] + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + + """ + toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools + """ + toolsUsed(filter: ToolsUsedFilter): ToolsUsed! + + """deploymentRisk is the risk status of the deployment""" + deploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRisk + + """policyRunHistory is the policy execution history for this deployment""" + policyRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + artifactAggregate(filter: ArtifactFilter): ArtifactAggregateResult + policyRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ApplicationDeploymentAggregateResult { + count: Int + idMin: String + idMax: String + deployedAtMin: DateTime + deployedAtMax: DateTime + sourceMin: String + sourceMax: String + componentMin: String + componentMax: String + deployedByMin: String + deployedByMax: String +} + +input ApplicationDeploymentFilter { + id: StringHashFilter + deployedAt: DateTimeFilter + deploymentStage: DeploymentStage_exact + component: StringExactFilter_StringRegExpFilter + has: [ApplicationDeploymentHasFilter] + and: [ApplicationDeploymentFilter] + or: [ApplicationDeploymentFilter] + not: ApplicationDeploymentFilter +} + +enum ApplicationDeploymentHasFilter { + id + artifact + applicationEnvironment + deployedAt + deploymentStage + source + component + deployedBy + toolsUsed + deploymentRisk + policyRunHistory +} + +input ApplicationDeploymentOrder { + asc: ApplicationDeploymentOrderable + desc: ApplicationDeploymentOrderable + then: ApplicationDeploymentOrder +} + +enum ApplicationDeploymentOrderable { + id + deployedAt + source + component + deployedBy +} + +input ApplicationDeploymentPatch { + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +input ApplicationDeploymentRef { + """id is randomly assigned""" + id: String + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! +} + +type ApplicationDeploymentRiskAggregateResult { + count: Int + sourceCodeAlertsScoreMin: Int + sourceCodeAlertsScoreMax: Int + sourceCodeAlertsScoreSum: Int + sourceCodeAlertsScoreAvg: Float + buildAlertsScoreMin: Int + buildAlertsScoreMax: Int + buildAlertsScoreSum: Int + buildAlertsScoreAvg: Float + artifactAlertsScoreMin: Int + artifactAlertsScoreMax: Int + artifactAlertsScoreSum: Int + artifactAlertsScoreAvg: Float + deploymentAlertsScoreMin: Int + deploymentAlertsScoreMax: Int + deploymentAlertsScoreSum: Int + deploymentAlertsScoreAvg: Float +} + +input ApplicationDeploymentRiskFilter { + id: [ID!] + deploymentRiskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationDeploymentRiskHasFilter] + and: [ApplicationDeploymentRiskFilter] + or: [ApplicationDeploymentRiskFilter] + not: ApplicationDeploymentRiskFilter +} + +enum ApplicationDeploymentRiskHasFilter { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore + deploymentRiskStatus + applicationDeployment +} + +input ApplicationDeploymentRiskOrder { + asc: ApplicationDeploymentRiskOrderable + desc: ApplicationDeploymentRiskOrderable + then: ApplicationDeploymentRiskOrder +} + +enum ApplicationDeploymentRiskOrderable { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore +} + +input ApplicationDeploymentRiskPatch { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +input ApplicationDeploymentRiskRef { + id: ID + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment { + """id is randomly assigned""" + id: String! + + """environment denotes whether it is dev, prod, staging, non-prod etc""" + environment(filter: EnvironmentFilter): Environment + application(filter: ApplicationFilter): Application! + deploymentTarget(filter: DeploymentTargetFilter): DeploymentTarget! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + riskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatus + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + deploymentsAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationEnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + namespaceMin: String + namespaceMax: String +} + +input ApplicationEnvironmentFilter { + id: StringHashFilter + namespace: StringExactFilter_StringRegExpFilter + has: [ApplicationEnvironmentHasFilter] + and: [ApplicationEnvironmentFilter] + or: [ApplicationEnvironmentFilter] + not: ApplicationEnvironmentFilter +} + +enum ApplicationEnvironmentHasFilter { + id + environment + application + deploymentTarget + namespace + toolsUsed + deployments + riskStatus + metadata +} + +input ApplicationEnvironmentOrder { + asc: ApplicationEnvironmentOrderable + desc: ApplicationEnvironmentOrderable + then: ApplicationEnvironmentOrder +} + +enum ApplicationEnvironmentOrderable { + id + namespace +} + +input ApplicationEnvironmentPatch { + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationEnvironmentRef { + """id is randomly assigned""" + id: String + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + has: [ApplicationHasFilter] + and: [ApplicationFilter] + or: [ApplicationFilter] + not: ApplicationFilter +} + +enum ApplicationHasFilter { + id + name + roles + environments + team + policies + policyEnforcements + metadata +} + +input ApplicationOrder { + asc: ApplicationOrderable + desc: ApplicationOrderable + then: ApplicationOrder +} + +enum ApplicationOrderable { + id + name +} + +input ApplicationPatch { + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +input ApplicationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! +} + +type ApplicationRiskStatusAggregateResult { + count: Int + sourceCodeAlertsMin: Int + sourceCodeAlertsMax: Int + sourceCodeAlertsSum: Int + sourceCodeAlertsAvg: Float + buildAlertsMin: Int + buildAlertsMax: Int + buildAlertsSum: Int + buildAlertsAvg: Float + artifactAlertsMin: Int + artifactAlertsMax: Int + artifactAlertsSum: Int + artifactAlertsAvg: Float + deploymentAlertsMin: Int + deploymentAlertsMax: Int + deploymentAlertsSum: Int + deploymentAlertsAvg: Float + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input ApplicationRiskStatusFilter { + id: [ID!] + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationRiskStatusHasFilter] + and: [ApplicationRiskStatusFilter] + or: [ApplicationRiskStatusFilter] + not: ApplicationRiskStatusFilter +} + +enum ApplicationRiskStatusHasFilter { + riskStatus + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt + applicationEnvironment +} + +input ApplicationRiskStatusOrder { + asc: ApplicationRiskStatusOrderable + desc: ApplicationRiskStatusOrderable + then: ApplicationRiskStatusOrder +} + +enum ApplicationRiskStatusOrderable { + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt +} + +input ApplicationRiskStatusPatch { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +input ApplicationRiskStatusRef { + id: ID + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +type Artifact { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + artifactDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + buildDetails(filter: BuildToolFilter): BuildTool + scanDataAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + artifactDeploymentAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult +} + +type ArtifactAggregateResult { + count: Int + idMin: String + idMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactNameMin: String + artifactNameMax: String + artifactTagMin: String + artifactTagMax: String + artifactShaMin: String + artifactShaMax: String +} + +input ArtifactFilter { + id: StringHashFilter + artifactType: StringExactFilter + artifactName: StringExactFilter_StringRegExpFilter + artifactTag: StringExactFilter_StringRegExpFilter + artifactSha: StringExactFilter + has: [ArtifactHasFilter] + and: [ArtifactFilter] + or: [ArtifactFilter] + not: ArtifactFilter +} + +enum ArtifactHasFilter { + id + artifactType + artifactName + artifactTag + artifactSha + scanData + artifactDeployment + buildDetails +} + +input ArtifactOrder { + asc: ArtifactOrderable + desc: ArtifactOrderable + then: ArtifactOrder +} + +enum ArtifactOrderable { + id + artifactType + artifactName + artifactTag + artifactSha +} + +input ArtifactPatch { + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +input ArtifactRef { + id: String + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type ArtifactScanData { + id: String! + artifactSha: String! + tool: String! + artifactDetails(filter: ArtifactFilter): Artifact + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + componentsAggregate(filter: ComponentFilter): ComponentAggregateResult + artifactRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ArtifactScanDataAggregateResult { + count: Int + idMin: String + idMax: String + artifactShaMin: String + artifactShaMax: String + toolMin: String + toolMax: String + lastScannedAtMin: DateTime + lastScannedAtMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + vulnTrackingIdMin: String + vulnTrackingIdMax: String + vulnCriticalCountMin: Int + vulnCriticalCountMax: Int + vulnCriticalCountSum: Int + vulnCriticalCountAvg: Float + vulnHighCountMin: Int + vulnHighCountMax: Int + vulnHighCountSum: Int + vulnHighCountAvg: Float + vulnMediumCountMin: Int + vulnMediumCountMax: Int + vulnMediumCountSum: Int + vulnMediumCountAvg: Float + vulnLowCountMin: Int + vulnLowCountMax: Int + vulnLowCountSum: Int + vulnLowCountAvg: Float + vulnInfoCountMin: Int + vulnInfoCountMax: Int + vulnInfoCountSum: Int + vulnInfoCountAvg: Float + vulnUnknownCountMin: Int + vulnUnknownCountMax: Int + vulnUnknownCountSum: Int + vulnUnknownCountAvg: Float + vulnNoneCountMin: Int + vulnNoneCountMax: Int + vulnNoneCountSum: Int + vulnNoneCountAvg: Float + vulnTotalCountMin: Int + vulnTotalCountMax: Int + vulnTotalCountSum: Int + vulnTotalCountAvg: Float + sbomUrlMin: String + sbomUrlMax: String + artifactLicenseScanUrlMin: String + artifactLicenseScanUrlMax: String + artifactSecretScanUrlMin: String + artifactSecretScanUrlMax: String + sourceLicenseScanUrlMin: String + sourceLicenseScanUrlMax: String + sourceSecretScanUrlMin: String + sourceSecretScanUrlMax: String + sourceScorecardScanUrlMin: String + sourceScorecardScanUrlMax: String + sourceSemgrepHighSeverityScanUrlMin: String + sourceSemgrepHighSeverityScanUrlMax: String + sourceSemgrepMediumSeverityScanUrlMin: String + sourceSemgrepMediumSeverityScanUrlMax: String + sourceSemgrepLowSeverityScanUrlMin: String + sourceSemgrepLowSeverityScanUrlMax: String + sourceSnykScanUrlMin: String + sourceSnykScanUrlMax: String + virusTotalUrlScanMin: String + virusTotalUrlScanMax: String +} + +input ArtifactScanDataFilter { + id: StringHashFilter + artifactSha: StringExactFilter + tool: StringExactFilter + vulnCriticalCount: IntFilter + vulnHighCount: IntFilter + vulnMediumCount: IntFilter + vulnLowCount: IntFilter + vulnInfoCount: IntFilter + vulnUnknownCount: IntFilter + vulnNoneCount: IntFilter + vulnTotalCount: IntFilter + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ArtifactScanDataHasFilter] + and: [ArtifactScanDataFilter] + or: [ArtifactScanDataFilter] + not: ArtifactScanDataFilter +} + +enum ArtifactScanDataHasFilter { + id + artifactSha + tool + artifactDetails + lastScannedAt + createdAt + vulnTrackingId + components + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan + riskStatus + artifactRunHistory +} + +input ArtifactScanDataOrder { + asc: ArtifactScanDataOrderable + desc: ArtifactScanDataOrderable + then: ArtifactScanDataOrder +} + +enum ArtifactScanDataOrderable { + id + artifactSha + tool + lastScannedAt + createdAt + vulnTrackingId + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan +} + +input ArtifactScanDataPatch { + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input ArtifactScanDataRef { + id: String + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input AuthRule { + and: [AuthRule] + or: [AuthRule] + not: AuthRule + rule: String +} + +"""BuildTool contains data from build tool events.""" +type BuildTool { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + + """artifactNode links a BuildTool node to an artifact""" + artifactNode(filter: ArtifactFilter): Artifact + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + + """sourceCodeTool links a BuildTool node to the source details""" + sourceCodeTool(filter: SourceCodeToolFilter): SourceCodeTool + + """commitMetaData links a BuildTool node to the git commit based details""" + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData!] + createdAt: DateTime! + commitMetaDataAggregate(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult +} + +type BuildToolAggregateResult { + count: Int + idMin: String + idMax: String + buildIdMin: String + buildIdMax: String + toolMin: String + toolMax: String + buildNameMin: String + buildNameMax: String + buildUrlMin: String + buildUrlMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactMin: String + artifactMax: String + artifactTagMin: String + artifactTagMax: String + digestMin: String + digestMax: String + buildDigestMin: String + buildDigestMax: String + buildTimeMin: DateTime + buildTimeMax: DateTime + buildUserMin: String + buildUserMax: String + createdAtMin: DateTime + createdAtMax: DateTime +} + +input BuildToolFilter { + id: StringHashFilter + buildId: StringExactFilter_StringRegExpFilter + tool: StringExactFilter + buildName: StringExactFilter_StringRegExpFilter + buildUrl: StringExactFilter + artifactType: StringExactFilter + artifact: StringExactFilter + artifactTag: StringExactFilter + digest: StringExactFilter + buildDigest: StringExactFilter + has: [BuildToolHasFilter] + and: [BuildToolFilter] + or: [BuildToolFilter] + not: BuildToolFilter +} + +enum BuildToolHasFilter { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + artifactNode + buildTime + buildUser + sourceCodeTool + commitMetaData + createdAt +} + +input BuildToolOrder { + asc: BuildToolOrderable + desc: BuildToolOrderable + then: BuildToolOrder +} + +enum BuildToolOrderable { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + buildTime + buildUser + createdAt +} + +input BuildToolPatch { + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +input BuildToolRef { + """id is randomly assigned""" + id: String + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + """id is randomly assigned""" + id: ID! + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool(filter: BuildToolFilter): BuildTool! +} + +type CommitMetaDataAggregateResult { + count: Int + commitMin: String + commitMax: String + repositoryMin: String + repositoryMax: String + noOfReviewersConfMin: Int + noOfReviewersConfMax: Int + noOfReviewersConfSum: Int + noOfReviewersConfAvg: Float +} + +input CommitMetaDataFilter { + id: [ID!] + has: [CommitMetaDataHasFilter] + and: [CommitMetaDataFilter] + or: [CommitMetaDataFilter] + not: CommitMetaDataFilter +} + +enum CommitMetaDataHasFilter { + commit + repository + commitSign + noOfReviewersConf + reviewerList + approverList + buildTool +} + +input CommitMetaDataOrder { + asc: CommitMetaDataOrderable + desc: CommitMetaDataOrderable + then: CommitMetaDataOrder +} + +enum CommitMetaDataOrderable { + commit + repository + noOfReviewersConf +} + +input CommitMetaDataPatch { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +input CommitMetaDataRef { + """id is randomly assigned""" + id: ID + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +type Component { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability!] + artifacts(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + vulnerabilitiesAggregate(filter: VulnerabilityFilter): VulnerabilityAggregateResult + artifactsAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ComponentAggregateResult { + count: Int + idMin: String + idMax: String + typeMin: String + typeMax: String + nameMin: String + nameMax: String + versionMin: String + versionMax: String + purlMin: String + purlMax: String + cpeMin: String + cpeMax: String + scannedAtMin: DateTime + scannedAtMax: DateTime +} + +input ComponentFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + version: StringExactFilter_StringRegExpFilter + purl: StringExactFilter + cpe: StringExactFilter + has: [ComponentHasFilter] + and: [ComponentFilter] + or: [ComponentFilter] + not: ComponentFilter +} + +enum ComponentHasFilter { + id + type + name + version + licenses + purl + cpe + scannedAt + vulnerabilities + artifacts +} + +input ComponentOrder { + asc: ComponentOrderable + desc: ComponentOrderable + then: ComponentOrder +} + +enum ComponentOrderable { + id + type + name + version + purl + cpe + scannedAt +} + +input ComponentPatch { + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ComponentRef { + id: String + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ContainsFilter { + point: PointRef + polygon: PolygonRef +} + +type Credentials { + id: ID! + data: String! + integrator(filter: IntegratorFilter): Integrator! +} + +type CredentialsAggregateResult { + count: Int + dataMin: String + dataMax: String +} + +input CredentialsFilter { + id: [ID!] + has: [CredentialsHasFilter] + and: [CredentialsFilter] + or: [CredentialsFilter] + not: CredentialsFilter +} + +enum CredentialsHasFilter { + data + integrator +} + +input CredentialsOrder { + asc: CredentialsOrderable + desc: CredentialsOrderable + then: CredentialsOrder +} + +enum CredentialsOrderable { + data +} + +input CredentialsPatch { + data: String + integrator: IntegratorRef +} + +input CredentialsRef { + id: ID + data: String + integrator: IntegratorRef +} + +input CustomHTTP { + url: String! + method: HTTPMethod! + body: String + graphql: String + mode: Mode + forwardHeaders: [String!] + secretHeaders: [String!] + introspectionHeaders: [String!] + skipIntrospection: Boolean +} + +type CWE { + id: String! + name: String! + description: String +} + +type CWEAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + descriptionMin: String + descriptionMax: String +} + +input CWEFilter { + id: StringHashFilter + has: [CWEHasFilter] + and: [CWEFilter] + or: [CWEFilter] + not: CWEFilter +} + +enum CWEHasFilter { + id + name + description +} + +input CWEOrder { + asc: CWEOrderable + desc: CWEOrderable + then: CWEOrder +} + +enum CWEOrderable { + id + name + description +} + +input CWEPatch { + name: String + description: String +} + +input CWERef { + id: String + name: String + description: String +} + +""" +The DateTime scalar type represents date and time as a string in RFC3339 format. +For example: "1985-04-12T23:20:50.52Z" represents 20 mins 50.52 secs after the 23rd hour of Apr 12th 1985 in UTC. +""" +scalar DateTime + +input DateTimeFilter { + eq: DateTime + in: [DateTime] + le: DateTime + lt: DateTime + ge: DateTime + gt: DateTime + between: DateTimeRange +} + +input DateTimeRange { + min: DateTime! + max: DateTime! +} + +type DeleteApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + msg: String + numUids: Int +} + +type DeleteApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + msg: String + numUids: Int +} + +type DeleteApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + msg: String + numUids: Int +} + +type DeleteApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + msg: String + numUids: Int +} + +type DeleteApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + msg: String + numUids: Int +} + +type DeleteArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + msg: String + numUids: Int +} + +type DeleteArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + msg: String + numUids: Int +} + +type DeleteBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + msg: String + numUids: Int +} + +type DeleteCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + msg: String + numUids: Int +} + +type DeleteComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + msg: String + numUids: Int +} + +type DeleteCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + msg: String + numUids: Int +} + +type DeleteCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + msg: String + numUids: Int +} + +type DeleteDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + msg: String + numUids: Int +} + +type DeleteEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + msg: String + numUids: Int +} + +type DeleteFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + msg: String + numUids: Int +} + +type DeleteIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + msg: String + numUids: Int +} + +type DeleteKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + msg: String + numUids: Int +} + +type DeleteOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + msg: String + numUids: Int +} + +type DeletePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + msg: String + numUids: Int +} + +type DeletePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + msg: String + numUids: Int +} + +type DeleteRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + msg: String + numUids: Int +} + +type DeleteRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + msg: String + numUids: Int +} + +type DeleteRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + msg: String + numUids: Int +} + +type DeleteSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + msg: String + numUids: Int +} + +type DeleteSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + msg: String + numUids: Int +} + +type DeleteSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + msg: String + numUids: Int +} + +type DeleteTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + msg: String + numUids: Int +} + +type DeleteTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + msg: String + numUids: Int +} + +type DeleteToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + msg: String + numUids: Int +} + +type DeleteVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + msg: String + numUids: Int +} + +"""DeploymentStage is an enum denoting the stage of the deployment. .""" +enum DeploymentStage { + """deployment is discovered from the events""" + discovered + + """scanning is under process""" + scanning + + """ + deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + """ + current + + """ + deployment becomes a past deployment because another fresh deployment has happened + """ + previous + + """deployment is blocked by the firewall""" + blocked +} + +input DeploymentStage_exact { + eq: DeploymentStage + in: [DeploymentStage] + le: DeploymentStage + lt: DeploymentStage + ge: DeploymentStage + gt: DeploymentStage + between: DeploymentStage +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization(filter: OrganizationFilter): Organization! + defaultEnvironment(filter: EnvironmentFilter): Environment! +} + +type DeploymentTargetAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + ipMin: String + ipMax: String + accountMin: String + accountMax: String + targetTypeMin: String + targetTypeMax: String + regionMin: String + regionMax: String + kubescapeServiceConnectedMin: String + kubescapeServiceConnectedMax: String +} + +input DeploymentTargetFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + ip: StringExactFilter + has: [DeploymentTargetHasFilter] + and: [DeploymentTargetFilter] + or: [DeploymentTargetFilter] + not: DeploymentTargetFilter +} + +enum DeploymentTargetHasFilter { + id + name + ip + account + targetType + region + kubescapeServiceConnected + isFirewall + organization + defaultEnvironment +} + +input DeploymentTargetOrder { + asc: DeploymentTargetOrderable + desc: DeploymentTargetOrderable + then: DeploymentTargetOrder +} + +enum DeploymentTargetOrderable { + id + name + ip + account + targetType + region + kubescapeServiceConnected +} + +input DeploymentTargetPatch { + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +input DeploymentTargetRef { + """id is randomly assigned""" + id: String + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +enum DgraphIndex { + int + int64 + float + bool + hash + exact + term + fulltext + trigram + regexp + year + month + day + hour + geo +} + +"""Environment can be things like dev, prod, staging etc.""" +type Environment { + id: String! + organization(filter: OrganizationFilter): Organization! + purpose: String! +} + +type EnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + purposeMin: String + purposeMax: String +} + +input EnvironmentFilter { + id: StringHashFilter + purpose: StringExactFilter + has: [EnvironmentHasFilter] + and: [EnvironmentFilter] + or: [EnvironmentFilter] + not: EnvironmentFilter +} + +enum EnvironmentHasFilter { + id + organization + purpose +} + +input EnvironmentOrder { + asc: EnvironmentOrderable + desc: EnvironmentOrderable + then: EnvironmentOrder +} + +enum EnvironmentOrderable { + id + purpose +} + +input EnvironmentPatch { + organization: OrganizationRef + purpose: String +} + +input EnvironmentRef { + id: String + organization: OrganizationRef + purpose: String +} + +type FeatureMode { + id: String! + organization(filter: OrganizationFilter): Organization! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type FeatureModeAggregateResult { + count: Int + idMin: String + idMax: String + scanMin: String + scanMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input FeatureModeFilter { + id: StringHashFilter + scan: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [FeatureModeHasFilter] + and: [FeatureModeFilter] + or: [FeatureModeFilter] + not: FeatureModeFilter +} + +enum FeatureModeHasFilter { + id + organization + scan + type + enabled + category + createdAt + updatedAt +} + +input FeatureModeOrder { + asc: FeatureModeOrderable + desc: FeatureModeOrderable + then: FeatureModeOrder +} + +enum FeatureModeOrderable { + id + scan + type + category + createdAt + updatedAt +} + +input FeatureModePatch { + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FeatureModeRef { + id: String + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FloatFilter { + eq: Float + in: [Float] + le: Float + lt: Float + ge: Float + gt: Float + between: FloatRange +} + +input FloatRange { + min: Float! + max: Float! +} + +input GenerateMutationParams { + add: Boolean + update: Boolean + delete: Boolean +} + +input GenerateQueryParams { + get: Boolean + query: Boolean + password: Boolean + aggregate: Boolean +} + +enum HTTPMethod { + GET + POST + PUT + PATCH + DELETE +} + +""" +The Int64 scalar type represents a signed 64‐bit numeric non‐fractional value. +Int64 can represent values in range [-(2^63),(2^63 - 1)]. +""" +scalar Int64 + +input Int64Filter { + eq: Int64 + in: [Int64] + le: Int64 + lt: Int64 + ge: Int64 + gt: Int64 + between: Int64Range +} + +input Int64Range { + min: Int64! + max: Int64! +} + +type Integrator { + id: String! + organization(filter: OrganizationFilter): Organization! + name: String! + type: String! + category: String! + credentials(filter: CredentialsFilter): Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type IntegratorAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input IntegratorFilter { + id: StringHashFilter + name: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [IntegratorHasFilter] + and: [IntegratorFilter] + or: [IntegratorFilter] + not: IntegratorFilter +} + +enum IntegratorHasFilter { + id + organization + name + type + category + credentials + createdAt + updatedAt +} + +input IntegratorOrder { + asc: IntegratorOrderable + desc: IntegratorOrderable + then: IntegratorOrder +} + +enum IntegratorOrderable { + id + name + type + category + createdAt + updatedAt +} + +input IntegratorPatch { + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntegratorRef { + id: String + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntersectsFilter { + polygon: PolygonRef + multiPolygon: MultiPolygonRef +} + +input IntFilter { + eq: Int + in: [Int] + le: Int + lt: Int + ge: Int + gt: Int + between: IntRange +} + +input IntRange { + min: Int! + max: Int! +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! + name: String! + value: String! +} + +type KeyValueAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + valueMin: String + valueMax: String +} + +input KeyValueFilter { + id: StringHashFilter + has: [KeyValueHasFilter] + and: [KeyValueFilter] + or: [KeyValueFilter] + not: KeyValueFilter +} + +enum KeyValueHasFilter { + id + name + value +} + +input KeyValueOrder { + asc: KeyValueOrderable + desc: KeyValueOrderable + then: KeyValueOrder +} + +enum KeyValueOrderable { + id + name + value +} + +input KeyValuePatch { + name: String + value: String +} + +input KeyValueRef { + id: String + name: String + value: String +} + +enum Mode { + BATCH + SINGLE +} + +type MultiPolygon { + polygons: [Polygon!]! +} + +input MultiPolygonRef { + polygons: [PolygonRef!]! +} + +type Mutation { + addSchemaVersion(input: [AddSchemaVersionInput!]!): AddSchemaVersionPayload + updateSchemaVersion(input: UpdateSchemaVersionInput!): UpdateSchemaVersionPayload + deleteSchemaVersion(filter: SchemaVersionFilter!): DeleteSchemaVersionPayload + updateRBAC(input: UpdateRBACInput!): UpdateRBACPayload + deleteRBAC(filter: RBACFilter!): DeleteRBACPayload + addRole(input: [AddRoleInput!]!, upsert: Boolean): AddRolePayload + updateRole(input: UpdateRoleInput!): UpdateRolePayload + deleteRole(filter: RoleFilter!): DeleteRolePayload + addKeyValue(input: [AddKeyValueInput!]!, upsert: Boolean): AddKeyValuePayload + updateKeyValue(input: UpdateKeyValueInput!): UpdateKeyValuePayload + deleteKeyValue(filter: KeyValueFilter!): DeleteKeyValuePayload + addOrganization(input: [AddOrganizationInput!]!, upsert: Boolean): AddOrganizationPayload + updateOrganization(input: UpdateOrganizationInput!): UpdateOrganizationPayload + deleteOrganization(filter: OrganizationFilter!): DeleteOrganizationPayload + addEnvironment(input: [AddEnvironmentInput!]!, upsert: Boolean): AddEnvironmentPayload + updateEnvironment(input: UpdateEnvironmentInput!): UpdateEnvironmentPayload + deleteEnvironment(filter: EnvironmentFilter!): DeleteEnvironmentPayload + addDeploymentTarget(input: [AddDeploymentTargetInput!]!, upsert: Boolean): AddDeploymentTargetPayload + updateDeploymentTarget(input: UpdateDeploymentTargetInput!): UpdateDeploymentTargetPayload + deleteDeploymentTarget(filter: DeploymentTargetFilter!): DeleteDeploymentTargetPayload + addTeam(input: [AddTeamInput!]!, upsert: Boolean): AddTeamPayload + updateTeam(input: UpdateTeamInput!): UpdateTeamPayload + deleteTeam(filter: TeamFilter!): DeleteTeamPayload + addApplication(input: [AddApplicationInput!]!, upsert: Boolean): AddApplicationPayload + updateApplication(input: UpdateApplicationInput!): UpdateApplicationPayload + deleteApplication(filter: ApplicationFilter!): DeleteApplicationPayload + addApplicationEnvironment(input: [AddApplicationEnvironmentInput!]!, upsert: Boolean): AddApplicationEnvironmentPayload + updateApplicationEnvironment(input: UpdateApplicationEnvironmentInput!): UpdateApplicationEnvironmentPayload + deleteApplicationEnvironment(filter: ApplicationEnvironmentFilter!): DeleteApplicationEnvironmentPayload + addApplicationRiskStatus(input: [AddApplicationRiskStatusInput!]!): AddApplicationRiskStatusPayload + updateApplicationRiskStatus(input: UpdateApplicationRiskStatusInput!): UpdateApplicationRiskStatusPayload + deleteApplicationRiskStatus(filter: ApplicationRiskStatusFilter!): DeleteApplicationRiskStatusPayload + addApplicationDeployment(input: [AddApplicationDeploymentInput!]!, upsert: Boolean): AddApplicationDeploymentPayload + updateApplicationDeployment(input: UpdateApplicationDeploymentInput!): UpdateApplicationDeploymentPayload + deleteApplicationDeployment(filter: ApplicationDeploymentFilter!): DeleteApplicationDeploymentPayload + addToolsUsed(input: [AddToolsUsedInput!]!): AddToolsUsedPayload + updateToolsUsed(input: UpdateToolsUsedInput!): UpdateToolsUsedPayload + deleteToolsUsed(filter: ToolsUsedFilter!): DeleteToolsUsedPayload + addApplicationDeploymentRisk(input: [AddApplicationDeploymentRiskInput!]!): AddApplicationDeploymentRiskPayload + updateApplicationDeploymentRisk(input: UpdateApplicationDeploymentRiskInput!): UpdateApplicationDeploymentRiskPayload + deleteApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter!): DeleteApplicationDeploymentRiskPayload + addIntegrator(input: [AddIntegratorInput!]!, upsert: Boolean): AddIntegratorPayload + updateIntegrator(input: UpdateIntegratorInput!): UpdateIntegratorPayload + deleteIntegrator(filter: IntegratorFilter!): DeleteIntegratorPayload + addCredentials(input: [AddCredentialsInput!]!): AddCredentialsPayload + updateCredentials(input: UpdateCredentialsInput!): UpdateCredentialsPayload + deleteCredentials(filter: CredentialsFilter!): DeleteCredentialsPayload + addFeatureMode(input: [AddFeatureModeInput!]!, upsert: Boolean): AddFeatureModePayload + updateFeatureMode(input: UpdateFeatureModeInput!): UpdateFeatureModePayload + deleteFeatureMode(filter: FeatureModeFilter!): DeleteFeatureModePayload + addTag(input: [AddTagInput!]!, upsert: Boolean): AddTagPayload + updateTag(input: UpdateTagInput!): UpdateTagPayload + deleteTag(filter: TagFilter!): DeleteTagPayload + addPolicyDefinition(input: [AddPolicyDefinitionInput!]!, upsert: Boolean): AddPolicyDefinitionPayload + updatePolicyDefinition(input: UpdatePolicyDefinitionInput!): UpdatePolicyDefinitionPayload + deletePolicyDefinition(filter: PolicyDefinitionFilter!): DeletePolicyDefinitionPayload + addPolicyEnforcement(input: [AddPolicyEnforcementInput!]!): AddPolicyEnforcementPayload + updatePolicyEnforcement(input: UpdatePolicyEnforcementInput!): UpdatePolicyEnforcementPayload + deletePolicyEnforcement(filter: PolicyEnforcementFilter!): DeletePolicyEnforcementPayload + addRunHistory(input: [AddRunHistoryInput!]!): AddRunHistoryPayload + updateRunHistory(input: UpdateRunHistoryInput!): UpdateRunHistoryPayload + deleteRunHistory(filter: RunHistoryFilter!): DeleteRunHistoryPayload + addSecurityIssue(input: [AddSecurityIssueInput!]!): AddSecurityIssuePayload + updateSecurityIssue(input: UpdateSecurityIssueInput!): UpdateSecurityIssuePayload + deleteSecurityIssue(filter: SecurityIssueFilter!): DeleteSecurityIssuePayload + addBuildTool(input: [AddBuildToolInput!]!, upsert: Boolean): AddBuildToolPayload + updateBuildTool(input: UpdateBuildToolInput!): UpdateBuildToolPayload + deleteBuildTool(filter: BuildToolFilter!): DeleteBuildToolPayload + addSourceCodeTool(input: [AddSourceCodeToolInput!]!, upsert: Boolean): AddSourceCodeToolPayload + updateSourceCodeTool(input: UpdateSourceCodeToolInput!): UpdateSourceCodeToolPayload + deleteSourceCodeTool(filter: SourceCodeToolFilter!): DeleteSourceCodeToolPayload + addCommitMetaData(input: [AddCommitMetaDataInput!]!): AddCommitMetaDataPayload + updateCommitMetaData(input: UpdateCommitMetaDataInput!): UpdateCommitMetaDataPayload + deleteCommitMetaData(filter: CommitMetaDataFilter!): DeleteCommitMetaDataPayload + addArtifact(input: [AddArtifactInput!]!, upsert: Boolean): AddArtifactPayload + updateArtifact(input: UpdateArtifactInput!): UpdateArtifactPayload + deleteArtifact(filter: ArtifactFilter!): DeleteArtifactPayload + addArtifactScanData(input: [AddArtifactScanDataInput!]!, upsert: Boolean): AddArtifactScanDataPayload + updateArtifactScanData(input: UpdateArtifactScanDataInput!): UpdateArtifactScanDataPayload + deleteArtifactScanData(filter: ArtifactScanDataFilter!): DeleteArtifactScanDataPayload + addComponent(input: [AddComponentInput!]!, upsert: Boolean): AddComponentPayload + updateComponent(input: UpdateComponentInput!): UpdateComponentPayload + deleteComponent(filter: ComponentFilter!): DeleteComponentPayload + addVulnerability(input: [AddVulnerabilityInput!]!, upsert: Boolean): AddVulnerabilityPayload + updateVulnerability(input: UpdateVulnerabilityInput!): UpdateVulnerabilityPayload + deleteVulnerability(filter: VulnerabilityFilter!): DeleteVulnerabilityPayload + addCWE(input: [AddCWEInput!]!, upsert: Boolean): AddCWEPayload + updateCWE(input: UpdateCWEInput!): UpdateCWEPayload + deleteCWE(filter: CWEFilter!): DeleteCWEPayload +} + +input NearFilter { + distance: Float! + coordinate: PointRef! +} + +type Organization implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + teams(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team!] + environments(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + integrators(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator!] + featureModes(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + teamsAggregate(filter: TeamFilter): TeamAggregateResult + environmentsAggregate(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + integratorsAggregate(filter: IntegratorFilter): IntegratorAggregateResult + featureModesAggregate(filter: FeatureModeFilter): FeatureModeAggregateResult +} + +type OrganizationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input OrganizationFilter { + id: StringHashFilter + name: StringExactFilter + has: [OrganizationHasFilter] + and: [OrganizationFilter] + or: [OrganizationFilter] + not: OrganizationFilter +} + +enum OrganizationHasFilter { + id + name + roles + teams + environments + policies + policyEnforcements + integrators + featureModes +} + +input OrganizationOrder { + asc: OrganizationOrderable + desc: OrganizationOrderable + then: OrganizationOrder +} + +enum OrganizationOrderable { + id + name +} + +input OrganizationPatch { + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +input OrganizationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type Point { + longitude: Float! + latitude: Float! +} + +input PointGeoFilter { + near: NearFilter + within: WithinFilter +} + +type PointList { + points: [Point!]! +} + +input PointListRef { + points: [PointRef!]! +} + +input PointRef { + longitude: Float! + latitude: Float! +} + +type PolicyDefinition { + id: String! + ownerOrg(filter: OrganizationFilter): Organization! + ownerTeam(filter: TeamFilter): Team + ownerApplication(filter: ApplicationFilter): Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyDefinitionAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime + policyNameMin: String + policyNameMax: String + categoryMin: String + categoryMax: String + stageMin: String + stageMax: String + descriptionMin: String + descriptionMax: String + scriptMin: String + scriptMax: String + variablesMin: String + variablesMax: String + conditionNameMin: String + conditionNameMax: String + suggestionMin: String + suggestionMax: String +} + +input PolicyDefinitionFilter { + id: StringHashFilter + policyName: StringExactFilter + category: StringExactFilter + stage: StringExactFilter + has: [PolicyDefinitionHasFilter] + and: [PolicyDefinitionFilter] + or: [PolicyDefinitionFilter] + not: PolicyDefinitionFilter +} + +enum PolicyDefinitionHasFilter { + id + ownerOrg + ownerTeam + ownerApplication + createdAt + updatedAt + policyName + category + stage + description + scheduledPolicy + script + variables + conditionName + suggestion +} + +input PolicyDefinitionOrder { + asc: PolicyDefinitionOrderable + desc: PolicyDefinitionOrderable + then: PolicyDefinitionOrder +} + +enum PolicyDefinitionOrderable { + id + createdAt + updatedAt + policyName + category + stage + description + script + variables + conditionName + suggestion +} + +input PolicyDefinitionPatch { + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +input PolicyDefinitionRef { + id: String + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy(filter: PolicyDefinitionFilter): PolicyDefinition! + enforcedOrg(filter: OrganizationFilter): Organization + enforcedTeam(filter: TeamFilter): Team + enforcedApplication(filter: ApplicationFilter): Application + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment!] + tags(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag!] + createdAt: DateTime! + updatedAt: DateTime! + environmentsAggregate(filter: EnvironmentFilter): EnvironmentAggregateResult + tagsAggregate(filter: TagFilter): TagAggregateResult +} + +type PolicyEnforcementAggregateResult { + count: Int + datasourceToolMin: String + datasourceToolMax: String + actionMin: String + actionMax: String + conditionValueMin: String + conditionValueMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input PolicyEnforcementFilter { + id: [ID!] + status: Boolean + datasourceTool: StringExactFilter + action: StringExactFilter + has: [PolicyEnforcementHasFilter] + and: [PolicyEnforcementFilter] + or: [PolicyEnforcementFilter] + not: PolicyEnforcementFilter +} + +enum PolicyEnforcementHasFilter { + policy + enforcedOrg + enforcedTeam + enforcedApplication + status + forceApply + severity + datasourceTool + action + conditionValue + environments + tags + createdAt + updatedAt +} + +input PolicyEnforcementOrder { + asc: PolicyEnforcementOrderable + desc: PolicyEnforcementOrderable + then: PolicyEnforcementOrder +} + +enum PolicyEnforcementOrderable { + datasourceTool + action + conditionValue + createdAt + updatedAt +} + +input PolicyEnforcementPatch { + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +input PolicyEnforcementRef { + id: ID + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +type Polygon { + coordinates: [PointList!]! +} + +input PolygonGeoFilter { + near: NearFilter + within: WithinFilter + contains: ContainsFilter + intersects: IntersectsFilter +} + +input PolygonRef { + coordinates: [PointListRef!]! +} + +type Query { + querySchemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + aggregateSchemaVersion(filter: SchemaVersionFilter): SchemaVersionAggregateResult + queryRBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + aggregateRBAC(filter: RBACFilter): RBACAggregateResult + getRole(id: String!): Role + queryRole(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + aggregateRole(filter: RoleFilter): RoleAggregateResult + getKeyValue(id: String!): KeyValue + queryKeyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + aggregateKeyValue(filter: KeyValueFilter): KeyValueAggregateResult + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getEnvironment(id: String!): Environment + queryEnvironment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + aggregateEnvironment(filter: EnvironmentFilter): EnvironmentAggregateResult + getDeploymentTarget(id: String!): DeploymentTarget + queryDeploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + aggregateDeploymentTarget(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + getApplicationRiskStatus(id: ID!): ApplicationRiskStatus + queryApplicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + aggregateApplicationRiskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatusAggregateResult + getApplicationDeployment(id: String!): ApplicationDeployment + queryApplicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + aggregateApplicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + getToolsUsed(id: ID!): ToolsUsed + queryToolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + aggregateToolsUsed(filter: ToolsUsedFilter): ToolsUsedAggregateResult + getApplicationDeploymentRisk(id: ID!): ApplicationDeploymentRisk + queryApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + aggregateApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRiskAggregateResult + getIntegrator(id: String!): Integrator + queryIntegrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + aggregateIntegrator(filter: IntegratorFilter): IntegratorAggregateResult + getCredentials(id: ID!): Credentials + queryCredentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + aggregateCredentials(filter: CredentialsFilter): CredentialsAggregateResult + getFeatureMode(id: String!): FeatureMode + queryFeatureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + aggregateFeatureMode(filter: FeatureModeFilter): FeatureModeAggregateResult + getTag(id: String!): Tag + queryTag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + aggregateTag(filter: TagFilter): TagAggregateResult + getPolicyDefinition(id: String!): PolicyDefinition + queryPolicyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + aggregatePolicyDefinition(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + getPolicyEnforcement(id: ID!): PolicyEnforcement + queryPolicyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + aggregatePolicyEnforcement(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + getRunHistory(id: ID!): RunHistory + queryRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + aggregateRunHistory(filter: RunHistoryFilter): RunHistoryAggregateResult + getSecurityIssue(id: ID!): SecurityIssue + querySecurityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + aggregateSecurityIssue(filter: SecurityIssueFilter): SecurityIssueAggregateResult + getBuildTool(id: String!): BuildTool + queryBuildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + aggregateBuildTool(filter: BuildToolFilter): BuildToolAggregateResult + getSourceCodeTool(id: String!): SourceCodeTool + querySourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + aggregateSourceCodeTool(filter: SourceCodeToolFilter): SourceCodeToolAggregateResult + getCommitMetaData(id: ID!): CommitMetaData + queryCommitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + aggregateCommitMetaData(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult + getArtifact(id: String!): Artifact + queryArtifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + aggregateArtifact(filter: ArtifactFilter): ArtifactAggregateResult + getArtifactScanData(id: String!): ArtifactScanData + queryArtifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + aggregateArtifactScanData(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + getComponent(id: String!): Component + queryComponent(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + aggregateComponent(filter: ComponentFilter): ComponentAggregateResult + getVulnerability(id: String!): Vulnerability + queryVulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + aggregateVulnerability(filter: VulnerabilityFilter): VulnerabilityAggregateResult + getCWE(id: String!): CWE + queryCWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + aggregateCWE(filter: CWEFilter): CWEAggregateResult +} + +interface RBAC { + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult +} + +type RBACAggregateResult { + count: Int +} + +input RBACFilter { + has: [RBACHasFilter] + and: [RBACFilter] + or: [RBACFilter] + not: RBACFilter +} + +enum RBACHasFilter { + roles +} + +input RBACPatch { + roles: [RoleRef!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + scanning +} + +input RiskStatus_exact { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus +} + +input RiskStatus_exact_StringRegExpFilter { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus + regexp: String +} + +type Role { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type RoleAggregateResult { + count: Int + idMin: String + idMax: String + groupMin: String + groupMax: String +} + +input RoleFilter { + id: StringHashFilter + group: StringHashFilter + permission: RolePermission_hash + has: [RoleHasFilter] + and: [RoleFilter] + or: [RoleFilter] + not: RoleFilter +} + +enum RoleHasFilter { + id + group + permission +} + +input RoleOrder { + asc: RoleOrderable + desc: RoleOrderable + then: RoleOrder +} + +enum RoleOrderable { + id + group +} + +input RolePatch { + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +enum RolePermission { + admin + write + read +} + +input RolePermission_hash { + eq: RolePermission + in: [RolePermission] +} + +input RoleRef { + """id is randomly assigned""" + id: String + + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +type RunHistory { + id: ID! + policyId: String! + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment + artifactScan(filter: ArtifactScanDataFilter): ArtifactScanData + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements(filter: PolicyEnforcementFilter): PolicyEnforcement! + securityIssue(filter: SecurityIssueFilter): SecurityIssue +} + +type RunHistoryAggregateResult { + count: Int + policyIdMin: String + policyIdMax: String + PolicyNameMin: String + PolicyNameMax: String + StageMin: String + StageMax: String + ArtifactMin: String + ArtifactMax: String + ArtifactTagMin: String + ArtifactTagMax: String + ArtifactShaMin: String + ArtifactShaMax: String + ArtifactNameTagMin: String + ArtifactNameTagMax: String + DatasourceToolMin: String + DatasourceToolMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + DeployedAtMin: DateTime + DeployedAtMax: DateTime + HashMin: String + HashMax: String + MetaDataMin: String + MetaDataMax: String + FileApiMin: String + FileApiMax: String +} + +input RunHistoryFilter { + id: [ID!] + policyId: StringExactFilter + PolicyName: StringExactFilter + Stage: StringExactFilter + Artifact: StringExactFilter + ArtifactTag: StringExactFilter + ArtifactSha: StringExactFilter + ArtifactNameTag: StringExactFilter_StringRegExpFilter + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + DeployedAt: DateTimeFilter + Pass: Boolean + scheduledPolicy: Boolean + has: [RunHistoryHasFilter] + and: [RunHistoryFilter] + or: [RunHistoryFilter] + not: RunHistoryFilter +} + +enum RunHistoryHasFilter { + policyId + applicationDeployment + artifactScan + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + Pass + MetaData + FileApi + scheduledPolicy + policyEnforcements + securityIssue +} + +input RunHistoryOrder { + asc: RunHistoryOrderable + desc: RunHistoryOrderable + then: RunHistoryOrder +} + +enum RunHistoryOrderable { + policyId + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + MetaData + FileApi +} + +input RunHistoryPatch { + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +input RunHistoryRef { + id: ID + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +type SchemaVersion { + version: String! +} + +type SchemaVersionAggregateResult { + count: Int + versionMin: String + versionMax: String +} + +input SchemaVersionFilter { + has: [SchemaVersionHasFilter] + and: [SchemaVersionFilter] + or: [SchemaVersionFilter] + not: SchemaVersionFilter +} + +enum SchemaVersionHasFilter { + version +} + +input SchemaVersionOrder { + asc: SchemaVersionOrderable + desc: SchemaVersionOrderable + then: SchemaVersionOrder +} + +enum SchemaVersionOrderable { + version +} + +input SchemaVersionPatch { + version: String +} + +input SchemaVersionRef { + version: String +} + +type SecurityIssue { + id: ID! + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + AffectsAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type SecurityIssueAggregateResult { + count: Int + AlertTitleMin: String + AlertTitleMax: String + AlertMessageMin: String + AlertMessageMax: String + SuggestionsMin: String + SuggestionsMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + ActionMin: String + ActionMax: String + JiraUrlMin: String + JiraUrlMax: String + StatusMin: String + StatusMax: String + ReasonMin: String + ReasonMax: String + ErrorMin: String + ErrorMax: String +} + +input SecurityIssueFilter { + id: [ID!] + AlertTitle: StringExactFilter_StringRegExpFilter + AlertMessage: StringExactFilter + Suggestions: StringExactFilter + Severity: Severity_exact + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + Action: StringExactFilter + Status: StringExactFilter + Reason: StringExactFilter + Error: StringExactFilter + has: [SecurityIssueHasFilter] + and: [SecurityIssueFilter] + or: [SecurityIssueFilter] + not: SecurityIssueFilter +} + +enum SecurityIssueHasFilter { + AlertTitle + AlertMessage + Suggestions + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error + Affects +} + +input SecurityIssueOrder { + asc: SecurityIssueOrderable + desc: SecurityIssueOrderable + then: SecurityIssueOrder +} + +enum SecurityIssueOrderable { + AlertTitle + AlertMessage + Suggestions + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error +} + +input SecurityIssuePatch { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +input SecurityIssueRef { + id: ID + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +input Severity_exact { + eq: Severity + in: [Severity] + le: Severity + lt: Severity + ge: Severity + gt: Severity + between: Severity +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool(filter: BuildToolFilter): BuildTool! +} + +type SourceCodeToolAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + scmMin: String + scmMax: String + repositoryMin: String + repositoryMax: String + branchMin: String + branchMax: String + headCommitMin: String + headCommitMax: String + diffCommitsMin: String + diffCommitsMax: String + licenseNameMin: String + licenseNameMax: String + visibilityMin: String + visibilityMax: String + workflowNameMin: String + workflowNameMax: String + parentRepoMin: String + parentRepoMax: String +} + +input SourceCodeToolFilter { + id: StringHashFilter + repository: StringExactFilter_StringRegExpFilter + has: [SourceCodeToolHasFilter] + and: [SourceCodeToolFilter] + or: [SourceCodeToolFilter] + not: SourceCodeToolFilter +} + +enum SourceCodeToolHasFilter { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo + buildTool +} + +input SourceCodeToolOrder { + asc: SourceCodeToolOrderable + desc: SourceCodeToolOrderable + then: SourceCodeToolOrder +} + +enum SourceCodeToolOrderable { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo +} + +input SourceCodeToolPatch { + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input SourceCodeToolRef { + """id is randomly assigned""" + id: String + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input StringExactFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange +} + +input StringExactFilter_StringRegExpFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange + regexp: String +} + +input StringFullTextFilter { + alloftext: String + anyoftext: String +} + +input StringHashFilter { + eq: String + in: [String] +} + +input StringRange { + min: String! + max: String! +} + +input StringRegExpFilter { + regexp: String +} + +input StringTermFilter { + allofterms: String + anyofterms: String +} + +type Subscription { + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult +} + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + policiesAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TagAggregateResult { + count: Int + idMin: String + idMax: String + tagNameMin: String + tagNameMax: String + tagValueMin: String + tagValueMax: String + tagDescriptionMin: String + tagDescriptionMax: String + createdByMin: String + createdByMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input TagFilter { + id: StringExactFilter + tagName: StringExactFilter + tagValue: StringExactFilter + createdBy: StringExactFilter + has: [TagHasFilter] + and: [TagFilter] + or: [TagFilter] + not: TagFilter +} + +enum TagHasFilter { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt + policies +} + +input TagOrder { + asc: TagOrderable + desc: TagOrderable + then: TagOrder +} + +enum TagOrderable { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt +} + +input TagPatch { + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +input TagRef { + id: String + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +type Team implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + organization(filter: OrganizationFilter): Organization! + applications(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application!] + labels(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + applicationsAggregate(filter: ApplicationFilter): ApplicationAggregateResult + labelsAggregate(filter: KeyValueFilter): KeyValueAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TeamAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input TeamFilter { + id: StringHashFilter + name: StringExactFilter + has: [TeamHasFilter] + and: [TeamFilter] + or: [TeamFilter] + not: TeamFilter +} + +enum TeamHasFilter { + id + name + roles + organization + applications + labels + policies + policyEnforcements +} + +input TeamOrder { + asc: TeamOrderable + desc: TeamOrderable + then: TeamOrder +} + +enum TeamOrderable { + id + name +} + +input TeamPatch { + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +input TeamRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type ToolsUsedAggregateResult { + count: Int + sourceMin: String + sourceMax: String + buildMin: String + buildMax: String + artifactMin: String + artifactMax: String + deployMin: String + deployMax: String + sbomMin: String + sbomMax: String +} + +input ToolsUsedFilter { + id: [ID!] + has: [ToolsUsedHasFilter] + and: [ToolsUsedFilter] + or: [ToolsUsedFilter] + not: ToolsUsedFilter +} + +enum ToolsUsedHasFilter { + source + build + artifact + deploy + sbom + misc +} + +input ToolsUsedOrder { + asc: ToolsUsedOrderable + desc: ToolsUsedOrderable + then: ToolsUsedOrder +} + +enum ToolsUsedOrderable { + source + build + artifact + deploy + sbom +} + +input ToolsUsedPatch { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input ToolsUsedRef { + id: ID + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input UpdateApplicationDeploymentInput { + filter: ApplicationDeploymentFilter! + set: ApplicationDeploymentPatch + remove: ApplicationDeploymentPatch +} + +type UpdateApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input UpdateApplicationDeploymentRiskInput { + filter: ApplicationDeploymentRiskFilter! + set: ApplicationDeploymentRiskPatch + remove: ApplicationDeploymentRiskPatch +} + +type UpdateApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input UpdateApplicationEnvironmentInput { + filter: ApplicationEnvironmentFilter! + set: ApplicationEnvironmentPatch + remove: ApplicationEnvironmentPatch +} + +type UpdateApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input UpdateApplicationInput { + filter: ApplicationFilter! + set: ApplicationPatch + remove: ApplicationPatch +} + +type UpdateApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input UpdateApplicationRiskStatusInput { + filter: ApplicationRiskStatusFilter! + set: ApplicationRiskStatusPatch + remove: ApplicationRiskStatusPatch +} + +type UpdateApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input UpdateArtifactInput { + filter: ArtifactFilter! + set: ArtifactPatch + remove: ArtifactPatch +} + +type UpdateArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input UpdateArtifactScanDataInput { + filter: ArtifactScanDataFilter! + set: ArtifactScanDataPatch + remove: ArtifactScanDataPatch +} + +type UpdateArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input UpdateBuildToolInput { + filter: BuildToolFilter! + set: BuildToolPatch + remove: BuildToolPatch +} + +type UpdateBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input UpdateCommitMetaDataInput { + filter: CommitMetaDataFilter! + set: CommitMetaDataPatch + remove: CommitMetaDataPatch +} + +type UpdateCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input UpdateComponentInput { + filter: ComponentFilter! + set: ComponentPatch + remove: ComponentPatch +} + +type UpdateComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input UpdateCredentialsInput { + filter: CredentialsFilter! + set: CredentialsPatch + remove: CredentialsPatch +} + +type UpdateCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input UpdateCWEInput { + filter: CWEFilter! + set: CWEPatch + remove: CWEPatch +} + +type UpdateCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input UpdateDeploymentTargetInput { + filter: DeploymentTargetFilter! + set: DeploymentTargetPatch + remove: DeploymentTargetPatch +} + +type UpdateDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input UpdateEnvironmentInput { + filter: EnvironmentFilter! + set: EnvironmentPatch + remove: EnvironmentPatch +} + +type UpdateEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input UpdateFeatureModeInput { + filter: FeatureModeFilter! + set: FeatureModePatch + remove: FeatureModePatch +} + +type UpdateFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input UpdateIntegratorInput { + filter: IntegratorFilter! + set: IntegratorPatch + remove: IntegratorPatch +} + +type UpdateIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input UpdateKeyValueInput { + filter: KeyValueFilter! + set: KeyValuePatch + remove: KeyValuePatch +} + +type UpdateKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input UpdateOrganizationInput { + filter: OrganizationFilter! + set: OrganizationPatch + remove: OrganizationPatch +} + +type UpdateOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input UpdatePolicyDefinitionInput { + filter: PolicyDefinitionFilter! + set: PolicyDefinitionPatch + remove: PolicyDefinitionPatch +} + +type UpdatePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input UpdatePolicyEnforcementInput { + filter: PolicyEnforcementFilter! + set: PolicyEnforcementPatch + remove: PolicyEnforcementPatch +} + +type UpdatePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input UpdateRBACInput { + filter: RBACFilter! + set: RBACPatch + remove: RBACPatch +} + +type UpdateRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + numUids: Int +} + +input UpdateRoleInput { + filter: RoleFilter! + set: RolePatch + remove: RolePatch +} + +type UpdateRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input UpdateRunHistoryInput { + filter: RunHistoryFilter! + set: RunHistoryPatch + remove: RunHistoryPatch +} + +type UpdateRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input UpdateSchemaVersionInput { + filter: SchemaVersionFilter! + set: SchemaVersionPatch + remove: SchemaVersionPatch +} + +type UpdateSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input UpdateSecurityIssueInput { + filter: SecurityIssueFilter! + set: SecurityIssuePatch + remove: SecurityIssuePatch +} + +type UpdateSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input UpdateSourceCodeToolInput { + filter: SourceCodeToolFilter! + set: SourceCodeToolPatch + remove: SourceCodeToolPatch +} + +type UpdateSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input UpdateTagInput { + filter: TagFilter! + set: TagPatch + remove: TagPatch +} + +type UpdateTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input UpdateTeamInput { + filter: TeamFilter! + set: TeamPatch + remove: TeamPatch +} + +type UpdateTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input UpdateToolsUsedInput { + filter: ToolsUsedFilter! + set: ToolsUsedPatch + remove: ToolsUsedPatch +} + +type UpdateToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input UpdateVulnerabilityInput { + filter: VulnerabilityFilter! + set: VulnerabilityPatch + remove: VulnerabilityPatch +} + +type UpdateVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Vulnerability { + id: String! + parent: String! + ratings: Severity + cwes(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + cwesAggregate(filter: CWEFilter): CWEAggregateResult + affectsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type VulnerabilityAggregateResult { + count: Int + idMin: String + idMax: String + parentMin: String + parentMax: String + summaryMin: String + summaryMax: String + detailMin: String + detailMax: String + recommendationMin: String + recommendationMax: String + publishedMin: DateTime + publishedMax: DateTime + modifiedMin: DateTime + modifiedMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + cvssMin: Float + cvssMax: Float + cvssSum: Float + cvssAvg: Float + priorityMin: String + priorityMax: String + epssMin: Float + epssMax: Float + epssSum: Float + epssAvg: Float + cisa_kevMin: String + cisa_kevMax: String +} + +input VulnerabilityFilter { + id: StringHashFilter + parent: StringExactFilter_StringRegExpFilter + ratings: Severity_exact + createdAt: DateTimeFilter + cvss: FloatFilter + priority: StringExactFilter_StringRegExpFilter + epss: FloatFilter + cisa_kev: StringExactFilter_StringRegExpFilter + has: [VulnerabilityHasFilter] + and: [VulnerabilityFilter] + or: [VulnerabilityFilter] + not: VulnerabilityFilter +} + +enum VulnerabilityHasFilter { + id + parent + ratings + cwes + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev + affects +} + +input VulnerabilityOrder { + asc: VulnerabilityOrderable + desc: VulnerabilityOrderable + then: VulnerabilityOrder +} + +enum VulnerabilityOrderable { + id + parent + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev +} + +input VulnerabilityPatch { + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input VulnerabilityRef { + id: String + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input WithinFilter { + polygon: PolygonRef! +} + diff --git a/april2024june2024/upgradeSteps.go b/april2024june2024/upgradeSteps.go new file mode 100644 index 0000000..f86ebbe --- /dev/null +++ b/april2024june2024/upgradeSteps.go @@ -0,0 +1,36 @@ +package april2024june2024 + +import ( + "fmt" + + graphqlfunc "upgradationScript/graphqlFunc" + "upgradationScript/logger" + "upgradationScript/schemas" + + "github.com/Khan/genqlient/graphql" +) + +func UpgradeToJune2024(prodGraphUrl, prodToken, expDgraphUrl, restoreServiceUrl string, prodDgraphClient, expDgraphClient graphql.Client) error { + + logger.Logger.Info("--------------Starting UpgradeToJune2024------------------") + + if err := performDeDeplicationTransition(prodDgraphClient, expDgraphClient); err != nil { + return fmt.Errorf("UpgradeToJune2024: %s", err.Error()) + } + + if err := graphqlfunc.BackupAndRestoreDgraph(expDgraphUrl, restoreServiceUrl); err != nil { + return fmt.Errorf("UpgradeToJune2024: BackupAndRestoreDgraph: %s", err.Error()) + } + + if err := graphqlfunc.UpdateSchema(prodGraphUrl, prodToken, []byte(schemas.June2024Schema)); err != nil { + return fmt.Errorf("UpgradeToJune2024: UpdateSchema: %s", err.Error()) + } + + if err := populateAppLevelTools(prodDgraphClient); err != nil { + return fmt.Errorf("UpgradeToJune2024: %s", err.Error()) + } + + logger.Logger.Info("--------------Completed UpgradeToJune2024------------------") + + return nil +} diff --git a/april2024june2024/utils.go b/april2024june2024/utils.go new file mode 100644 index 0000000..6392756 --- /dev/null +++ b/april2024june2024/utils.go @@ -0,0 +1,15 @@ +package april2024june2024 + +func AppendIfNotPresent(slice []string, str string) []string { + + if str == "" || str == "[]" { + return slice + } + + for _, s := range slice { + if s == str { + return slice // String already present, return original slice + } + } + return append(slice, str) // String not present, append it to the slice +} diff --git a/common/conf.go b/common/conf.go new file mode 100644 index 0000000..45f3588 --- /dev/null +++ b/common/conf.go @@ -0,0 +1,61 @@ +package common + +import ( + "os" + "strings" + "upgradationScript/logger" + + "github.com/OpsMx/ssd-jwt-auth/ssdjwtauth" + "gopkg.in/yaml.v2" +) + +type Configuration struct { + ProdGraphQLAddr string `json:"prodGraphQLAddr,omitempty" yaml:"prodGraphQLAddr,omitempty"` + ExpGraphQLAddr string `json:"expGraphQLAddr,omitempty" yaml:"expGraphQLAddr,omitempty"` + ProdDgraphToken string `json:"prodDgraphToken,omitempty" yaml:"prodDgraphToken,omitempty"` + ExpDgraphToken string `json:"expDgraphToken,omitempty" yaml:"expDgraphToken,omitempty"` + UpgradeToVersion string `json:"upgradeToVersion,omitempty" yaml:"upgradeToVersion,omitempty"` + RemoteDgraphRestoreUrl string `json:"remoteDgraphRestoreUrl,omitempty" yaml:"remoteDgraphRestoreUrl,omitempty"` +} + +var ( + Conf *Configuration + TokenVerifier *ssdjwtauth.Verifier + UpgradeToVersion SchemaOrder +) + +func LoadConfigurationFile(confPath string) { + + buf, err := os.ReadFile(confPath) + if err != nil { + logger.Logger.Sugar().Fatalw("os.ReadFile", "error", err.Error()) + } + + if err := yaml.Unmarshal(buf, &Conf); err != nil { + logger.Logger.Sugar().Fatalw("yaml.Unmarshal", "error", err.Error()) + } + + if strings.TrimSpace(Conf.ProdGraphQLAddr) == "" { + logger.Logger.Sugar().Fatalw("prodGraphQLAddr is empty Please provide") + } + + if strings.TrimSpace(Conf.ProdDgraphToken) == "" { + logger.Logger.Sugar().Fatalw("prodDgraphToken is empty Please provide") + } + + TokenVerifier, err = ssdjwtauth.NewVerifier(map[string][]byte{}, nil) + if err != nil { + logger.Logger.Sugar().Fatalf("ssdjwtauth.NewVerifier: err : %s", err.Error()) + } + + if strings.TrimSpace(Conf.UpgradeToVersion) == "" { + logger.Logger.Sugar().Fatalw("upgradeToVersion is empty Please provide") + } + + var ok bool + UpgradeToVersion, ok = schemaOrderMap[Conf.UpgradeToVersion] + if !ok { + logger.Logger.Sugar().Fatalw("unrecognized schema version provided. Please provide in format MonthYYYY eg November2024") + } + +} diff --git a/common/expDgraphChecks.go b/common/expDgraphChecks.go new file mode 100644 index 0000000..ebe5899 --- /dev/null +++ b/common/expDgraphChecks.go @@ -0,0 +1,53 @@ +package common + +import ( + "fmt" + "os" + graphqlfunc "upgradationScript/graphqlFunc" + "upgradationScript/logger" +) + +func allChecksForExpDgraph(schema SchemaOrder) error { + if Conf.ExpGraphQLAddr == "" { + return fmt.Errorf("expGraphQLAddr is required") + } + + if Conf.ExpDgraphToken == "" { + return fmt.Errorf("expDgraphToken is required") + } + + if Conf.RemoteDgraphRestoreUrl == "" { + return fmt.Errorf("remoteDgraphRestoreUrl is required") + } + + if _, found := os.LookupEnv("S3_ENDPOINT_URL"); !found { + return fmt.Errorf("envar S3_ENDPOINT_URL is not set") + } + + if _, found := os.LookupEnv("AWS_ACCESS_KEY_ID"); !found { + return fmt.Errorf("envar AWS_ACCESS_KEY_ID is not set") + } + + if _, found := os.LookupEnv("AWS_SECRET_ACCESS_KEY"); !found { + return fmt.Errorf("envar AWS_SECRET_ACCESS_KEY is not set") + } + + schemaPresent, err := graphqlfunc.RetrieveSchema(Conf.ExpGraphQLAddr) + if err != nil { + return fmt.Errorf("allChecksForExpDgraph: RetrieveSchema: %s", err.Error()) + } + + if getTheSchemaVersion(schemaPresent) == schema { + return nil + } + + logger.Logger.Info("-------Updating schema of exp dgraph--------------") + + if err := graphqlfunc.UpdateSchema(Conf.ExpGraphQLAddr, Conf.ExpDgraphToken, []byte(schema.String())); err != nil { + return fmt.Errorf("allChecksForExpDgraph: UpdateSchema: %s", err.Error()) + } + + logger.Logger.Info("-------All checks passed of exp dgraph--------------") + + return nil +} diff --git a/common/schemaHelpers.go b/common/schemaHelpers.go new file mode 100644 index 0000000..77fca8e --- /dev/null +++ b/common/schemaHelpers.go @@ -0,0 +1,70 @@ +package common + +import ( + "upgradationScript/schemas" +) + +type SchemaOrder int + +const ( + UnIdentifiedVersion SchemaOrder = iota + April2024Version + June2024Version +) + +var SchemasString = map[SchemaOrder]string{ + April2024Version: schemas.April2024Schema, + June2024Version: schemas.June2024Schema, +} + +var schemaOrderMap = map[string]SchemaOrder{ + "April2024": April2024Version, + "June2024": June2024Version, +} + +func (e SchemaOrder) NameOfSchema() string { + for name, schemaOrder := range schemaOrderMap { + if e == schemaOrder { + return name + } + } + + return "UnidentifiedSchema" +} + +func (e SchemaOrder) String() string { + return SchemasString[e] +} + +func (e SchemaOrder) Int() int { + return int(e) +} + +func getTheSchemaVersion(checkSchema string) SchemaOrder { + + for schemaEnum, schema := range SchemasString { + + if schema == checkSchema { + return schemaEnum + } + } + + return UnIdentifiedVersion +} + +func checkIfSchemaAtUpgradedVersion(schemaOrder SchemaOrder) bool { + return schemaOrder.Int() == UpgradeToVersion.Int() +} + +func checkIfSchemaUpgradeNotPossible(schemaOrder SchemaOrder) bool { + return schemaOrder.Int() > UpgradeToVersion.Int() +} + +func totalUpgradeSteps(schemaVersion SchemaOrder) int { + return UpgradeToVersion.Int() - schemaVersion.Int() +} + +func upgradeSchemaBasedOnStep(schemaVersion SchemaOrder, step int) SchemaOrder { + step += 1 + return SchemaOrder(schemaVersion.Int() + step) +} diff --git a/common/tokenCheck.go b/common/tokenCheck.go new file mode 100644 index 0000000..9151375 --- /dev/null +++ b/common/tokenCheck.go @@ -0,0 +1,26 @@ +package common + +import ( + "fmt" + "time" + + "github.com/OpsMx/ssd-jwt-auth/ssdjwtauth" +) + +func CheckToken(dgraphToken string) error { + claims, err := TokenVerifier.VerifyToken(dgraphToken) + if err != nil { + return fmt.Errorf("unauthorized token") + } + + if claims.SSDCLaims.Type != ssdjwtauth.SSDTokenTypeInternal { + return fmt.Errorf("token is not of type internal") + } + + if claims.ExpiresAt.Time.Before(time.Now().Add(1 * time.Hour)) { + return fmt.Errorf("token will expire within an hour. Please ensure token has validity of atleast an hour before we begin upgradation process") + } + + return nil + +} diff --git a/common/upgradeSteps.go b/common/upgradeSteps.go new file mode 100644 index 0000000..3d90c2b --- /dev/null +++ b/common/upgradeSteps.go @@ -0,0 +1,100 @@ +package common + +import ( + "context" + "fmt" + + "upgradationScript/april2024june2024" + featuretable "upgradationScript/featureTable" + graphqlfunc "upgradationScript/graphqlFunc" + + "upgradationScript/logger" + policyingenstionscript "upgradationScript/policies" +) + +func StartUpgrade() error { + + logger.Logger.Info("------------Starting Upgrade--------------------") + + logger.Logger.Info("------------Retrieve Schema from Prod Dgraph--------------------") + + schema, err := graphqlfunc.RetrieveSchema(Conf.ProdGraphQLAddr) + if err != nil { + return fmt.Errorf("StartUpgrade: %s", err.Error()) + } + + logger.Logger.Info("------------Retrieved Schema from Prod Dgraph--------------------") + + schemaVersion := getTheSchemaVersion(schema) + + logger.Sl.Infof("Current Schema: %s", schemaVersion.NameOfSchema()) + + if checkIfSchemaUpgradeNotPossible(schemaVersion) { + return fmt.Errorf("cannot downgrade schema version. The current schema is at higher version than asked for") + } + + if checkIfSchemaAtUpgradedVersion(schemaVersion) { + logger.Logger.Info("---------------Schema already at upgraded version------------------------") + return upgradePoliciesAndFeat() + } + + logger.Logger.Info("------------All pre checks of schema passed starting with upgrading process--------------------") + + for i := range totalUpgradeSteps(schemaVersion) { + + logger.Sl.Infof("STEP %d of upgradin schema", i) + + if err := beginProcessOfUpgrade(upgradeSchemaBasedOnStep(schemaVersion, i)); err != nil { + return fmt.Errorf("StartUpgrade: beginProcessOfUpgrade: %s", err.Error()) + } + + } + + return upgradePoliciesAndFeat() + +} + +func beginProcessOfUpgrade(upgradeTo SchemaOrder) error { + + prodGraphqlClient := graphqlfunc.NewClient(Conf.ProdGraphQLAddr, Conf.ProdDgraphToken) + expGraphqlClient := graphqlfunc.NewClient(Conf.ExpGraphQLAddr, Conf.ExpDgraphToken) + + switch upgradeTo { + case June2024Version: + + if err := allChecksForExpDgraph(June2024Version); err != nil { + return err + } + + return april2024june2024.UpgradeToJune2024(Conf.ProdGraphQLAddr, Conf.ProdDgraphToken, Conf.ExpGraphQLAddr, Conf.RemoteDgraphRestoreUrl, prodGraphqlClient, expGraphqlClient) + } + + logger.Sl.Debugf("no upgrade steps for %s", upgradeTo.NameOfSchema()) + return nil +} + +func upgradePoliciesAndFeat() error { + + logger.Logger.Info("-----------Starting Upgrade of Policies & feat-----------------") + + graphqlClient := graphqlfunc.NewClient(Conf.ProdGraphQLAddr, Conf.ProdDgraphToken) + getOrgId, err := graphqlfunc.GetOrgId(context.Background(), graphqlClient) + if err != nil { + return fmt.Errorf("upgradePoliciesAndFeat: getOrgId: error: %s", err.Error()) + } + + orgId := getOrgId.QueryOrganization[0].Id + + if err := policyingenstionscript.UpgradePolicyAndTagData(graphqlClient, orgId); err != nil { + return fmt.Errorf("upgradePoliciesAndFeat: %s", err.Error()) + } + + if err := featuretable.FeatTableUpgradeSteps(graphqlClient, orgId); err != nil { + return fmt.Errorf("upgradePoliciesAndFeat: FeatTableUpgradeSteps: error: %s", err.Error()) + } + + logger.Logger.Info("------------Completed Upgrade of Policies & feat--------------------") + logger.Logger.Info("------------Comepleted Upgrade--------------------") + + return nil +} diff --git a/configs/sample-local-config.yaml b/configs/sample-local-config.yaml new file mode 100644 index 0000000..1a9d766 --- /dev/null +++ b/configs/sample-local-config.yaml @@ -0,0 +1,4 @@ +prodGraphQLAddr: "http://localhost:8080" +prodDgraphToken: "" +upgradeToVersion: "June2024" +remoteDgraphRestoreUrl: "" \ No newline at end of file diff --git a/configs/upgrade-job-manifest.yaml b/configs/upgrade-job-manifest.yaml new file mode 100644 index 0000000..bd2edff --- /dev/null +++ b/configs/upgrade-job-manifest.yaml @@ -0,0 +1,47 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: upgrade-job +spec: + selector: + template: + metadata: + labels: + batch.kubernetes.io/job-name: upgrade-job + job-name: upgrade-job + spec: + containers: + - image: genos1998/upgrade-script:v0.0.15 + imagePullPolicy: IfNotPresent + name: upgrade-job + env: + - name: S3_ENDPOINT_URL + value: https://ssd-jul10-minio.aoa.oes.opsmx.org/ + - name: AWS_ACCESS_KEY_ID + value: spinnakeradmin + - name: AWS_SECRET_ACCESS_KEY + value: spinnakeradmin + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - name: datadir + mountPath: /app/dgraph + - name: upgrade-job-volume + mountPath: /app/config/upgrade-job.yaml + subPath: upgrade-job.yaml + - name: scanresult + mountPath: /app/scanResult + restartPolicy: Never + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-dgraph-0 + - name: upgrade-job-volume + configMap: + name: upgrade-job + items: + - key: upgrade-job.yaml + path: upgrade-job.yaml + defaultMode: 420 + - name: scanresult + diff --git a/configs/upgrade-job.yaml b/configs/upgrade-job.yaml new file mode 100644 index 0000000..d789e49 --- /dev/null +++ b/configs/upgrade-job.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: upgrade-job +data: + upgrade-job.yaml: | + prodGraphQLAddr: https://ssd-jul10-graphql.aoa.oes.opsmx.org + prodDgraphToken: eyJhbGciOiJSUzI1NiIsImtpZCI6IjZiNjRiNTFmLWMyNGItNDRlZS04YTM3LTEzNTkyYWM4MGI3OCIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJPcHNNeCIsImF1ZCI6WyJzc2Qub3BzbXguaW8iXSwiZXhwIjoxNzIxMDUwNDgyLCJuYmYiOjE3MjEwMjg4ODIsImp0aSI6ImIzNjQ3YTY3LTQyN2MtMTFlZi04YWQxLWZlMTU2YmUxNmU4ZSIsInNzZC5vcHNteC5pbyI6eyJ0eXBlIjoiaW50ZXJuYWwtYWNjb3VudC92MSIsImF1dGhvcml6YXRpb25zIjpbImFjY2Vzcy1kZ3JhcGgiXSwic2VydmljZSI6InNzZC1vcGEifX0.AeZdwK8jagDwSyo-H51-zEsPZMvRXS-zyg6T1Ue5MfHI6464po17l4gmz3AwY76rkYHzKH5hXWXbcunhViy6cqf_AmiV4lvNNJmFLfxfSeP7Y0Aa2Qa_6j5NzkhnPVmAsGrek5jhr7TRE3jlh5BO8uC-HJORTwQiCrlycTWTprKBicSp00--QaX3y-xyxJmTC0JTbpW4fDUdT9QgK8w4al0DXEXslnEliBOz5qW1Xmm2WjNgEuwdjQRLctt6cr7tgkdilYqgN0r9QqIYAkn_Y-JtNlqyTYNEFWA_IUXc5IJrVwgPY6lU5eRmZb5jtTeOpjtftqYIB_JULgp3q33Otw + upgradeToVersion: June2024 + remoteDgraphRestoreUrl: https://ssd-jul10-dbkp.aoa.oes.opsmx.org + expGraphQLAddr: http://dgraph-public:8080 + expDgraphToken: eyJhbGciOiJSUzI1NiIsImtpZCI6ImNjOTFlYTBkLWJlYmQtNDM2NC04ODhjLThjZTM0ZWUxYTRmNCIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJPcHNNeCIsImF1ZCI6WyJzc2Qub3BzbXguaW8iXSwiZXhwIjoxNzIxMDUwNDU4LCJuYmYiOjE3MjEwMjg4NTgsImp0aSI6ImE0ZWRiOWJmLTQyN2MtMTFlZi04ZTA4LWYyNjg1MDE0NDBjZiIsInNzZC5vcHNteC5pbyI6eyJ0eXBlIjoiaW50ZXJuYWwtYWNjb3VudC92MSIsImF1dGhvcml6YXRpb25zIjpbImFjY2Vzcy1kZ3JhcGgiXSwic2VydmljZSI6InNzZC1vcGEifX0.eCRWHTwhC961El_1EBZ4lGVGbodLiU9vC-jOz9bpUAMvxrbvTye9nEoSYDZCxwixD_FKbvjfQyE_61xL9o_8FOAT5ywHx3n_NV6THGifiokl8XYp6m3fz--ShwknKi_OpmeJS0rmbpWsq7jhqNTRMJ87WKTnwhCGN10M8DmBoNeaih6W-N9Y98IQPKqPpDxQIAWz2jilnLs3JFVjZ9Z4abC3Dc8RsZaUX7rU2ljTtDyiYvZq81fVez2m0uEo9mfDYX1WRZHcjA6w5k4F62Qq00pCmz9nRTINKwkjJWNrlp1rOqKGIznRw6bFJGN99i8u9B1qT9pCS0HguR1DwABHHg + + diff --git a/docker/run.sh b/docker/run.sh new file mode 100644 index 0000000..473c09d --- /dev/null +++ b/docker/run.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# Copyright 2022 OpsMx, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $* diff --git a/featureTable/genqlient.yaml b/featureTable/genqlient.yaml new file mode 100644 index 0000000..ec18cc8 --- /dev/null +++ b/featureTable/genqlient.yaml @@ -0,0 +1,17 @@ +schema: schema.graphql +operations: +- queries.graphql +generated: schema-generated.go +package: featuretable +use_struct_references: true +bindings: + Boolean: + type: "*bool" + DateTime: + type: "*time.Time" + Int64: + type: int64 + Int: + type: "*int" + ID: + type: "*string" diff --git a/featureTable/queries.graphql b/featureTable/queries.graphql new file mode 100644 index 0000000..baef360 --- /dev/null +++ b/featureTable/queries.graphql @@ -0,0 +1,30 @@ +query checkIfFeatureRecordExists($featType: String!, $scan: String!) { + queryFeatureMode(filter: { type: { eq: $featType }, scan: { eq: $scan } }) { + id + } +} + +mutation addNewRecordFeatureTable( + $id: String! + $orgId: String! + $scan: String! + $featType: String! + $category: String! + $enabled: Boolean! + $ts: DateTime! +) { + addFeatureMode( + input: { + id: $id + organization: { id: $orgId } + scan: $scan + type: $featType + enabled: $enabled + category: $category + createdAt: $ts + updatedAt: $ts + } + ) { + numUids + } +} diff --git a/featureTable/schema-generated.go b/featureTable/schema-generated.go new file mode 100644 index 0000000..75776b9 --- /dev/null +++ b/featureTable/schema-generated.go @@ -0,0 +1,176 @@ +// Code generated by github.com/Khan/genqlient, DO NOT EDIT. + +package featuretable + +import ( + "context" + "time" + + "github.com/Khan/genqlient/graphql" +) + +// __addNewRecordFeatureTableInput is used internally by genqlient +type __addNewRecordFeatureTableInput struct { + Id string `json:"id"` + OrgId string `json:"orgId"` + Scan string `json:"scan"` + FeatType string `json:"featType"` + Category string `json:"category"` + Enabled *bool `json:"enabled"` + Ts *time.Time `json:"ts"` +} + +// GetId returns __addNewRecordFeatureTableInput.Id, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetId() string { return v.Id } + +// GetOrgId returns __addNewRecordFeatureTableInput.OrgId, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetOrgId() string { return v.OrgId } + +// GetScan returns __addNewRecordFeatureTableInput.Scan, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetScan() string { return v.Scan } + +// GetFeatType returns __addNewRecordFeatureTableInput.FeatType, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetFeatType() string { return v.FeatType } + +// GetCategory returns __addNewRecordFeatureTableInput.Category, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetCategory() string { return v.Category } + +// GetEnabled returns __addNewRecordFeatureTableInput.Enabled, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetEnabled() *bool { return v.Enabled } + +// GetTs returns __addNewRecordFeatureTableInput.Ts, and is useful for accessing the field via an interface. +func (v *__addNewRecordFeatureTableInput) GetTs() *time.Time { return v.Ts } + +// __checkIfFeatureRecordExistsInput is used internally by genqlient +type __checkIfFeatureRecordExistsInput struct { + FeatType string `json:"featType"` + Scan string `json:"scan"` +} + +// GetFeatType returns __checkIfFeatureRecordExistsInput.FeatType, and is useful for accessing the field via an interface. +func (v *__checkIfFeatureRecordExistsInput) GetFeatType() string { return v.FeatType } + +// GetScan returns __checkIfFeatureRecordExistsInput.Scan, and is useful for accessing the field via an interface. +func (v *__checkIfFeatureRecordExistsInput) GetScan() string { return v.Scan } + +// addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload includes the requested fields of the GraphQL type AddFeatureModePayload. +type addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload.NumUids, and is useful for accessing the field via an interface. +func (v *addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload) GetNumUids() *int { + return v.NumUids +} + +// addNewRecordFeatureTableResponse is returned by addNewRecordFeatureTable on success. +type addNewRecordFeatureTableResponse struct { + AddFeatureMode *addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload `json:"addFeatureMode"` +} + +// GetAddFeatureMode returns addNewRecordFeatureTableResponse.AddFeatureMode, and is useful for accessing the field via an interface. +func (v *addNewRecordFeatureTableResponse) GetAddFeatureMode() *addNewRecordFeatureTableAddFeatureModeAddFeatureModePayload { + return v.AddFeatureMode +} + +// checkIfFeatureRecordExistsQueryFeatureMode includes the requested fields of the GraphQL type FeatureMode. +type checkIfFeatureRecordExistsQueryFeatureMode struct { + Id string `json:"id"` +} + +// GetId returns checkIfFeatureRecordExistsQueryFeatureMode.Id, and is useful for accessing the field via an interface. +func (v *checkIfFeatureRecordExistsQueryFeatureMode) GetId() string { return v.Id } + +// checkIfFeatureRecordExistsResponse is returned by checkIfFeatureRecordExists on success. +type checkIfFeatureRecordExistsResponse struct { + QueryFeatureMode []*checkIfFeatureRecordExistsQueryFeatureMode `json:"queryFeatureMode"` +} + +// GetQueryFeatureMode returns checkIfFeatureRecordExistsResponse.QueryFeatureMode, and is useful for accessing the field via an interface. +func (v *checkIfFeatureRecordExistsResponse) GetQueryFeatureMode() []*checkIfFeatureRecordExistsQueryFeatureMode { + return v.QueryFeatureMode +} + +// The query or mutation executed by addNewRecordFeatureTable. +const addNewRecordFeatureTable_Operation = ` +mutation addNewRecordFeatureTable ($id: String!, $orgId: String!, $scan: String!, $featType: String!, $category: String!, $enabled: Boolean!, $ts: DateTime!) { + addFeatureMode(input: {id:$id,organization:{id:$orgId},scan:$scan,type:$featType,enabled:$enabled,category:$category,createdAt:$ts,updatedAt:$ts}) { + numUids + } +} +` + +func addNewRecordFeatureTable( + ctx_ context.Context, + client_ graphql.Client, + id string, + orgId string, + scan string, + featType string, + category string, + enabled *bool, + ts *time.Time, +) (*addNewRecordFeatureTableResponse, error) { + req_ := &graphql.Request{ + OpName: "addNewRecordFeatureTable", + Query: addNewRecordFeatureTable_Operation, + Variables: &__addNewRecordFeatureTableInput{ + Id: id, + OrgId: orgId, + Scan: scan, + FeatType: featType, + Category: category, + Enabled: enabled, + Ts: ts, + }, + } + var err_ error + + var data_ addNewRecordFeatureTableResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by checkIfFeatureRecordExists. +const checkIfFeatureRecordExists_Operation = ` +query checkIfFeatureRecordExists ($featType: String!, $scan: String!) { + queryFeatureMode(filter: {type:{eq:$featType},scan:{eq:$scan}}) { + id + } +} +` + +func checkIfFeatureRecordExists( + ctx_ context.Context, + client_ graphql.Client, + featType string, + scan string, +) (*checkIfFeatureRecordExistsResponse, error) { + req_ := &graphql.Request{ + OpName: "checkIfFeatureRecordExists", + Query: checkIfFeatureRecordExists_Operation, + Variables: &__checkIfFeatureRecordExistsInput{ + FeatType: featType, + Scan: scan, + }, + } + var err_ error + + var data_ checkIfFeatureRecordExistsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} diff --git a/featureTable/schema.graphql b/featureTable/schema.graphql new file mode 100644 index 0000000..183bbbd --- /dev/null +++ b/featureTable/schema.graphql @@ -0,0 +1,4664 @@ +directive @secret(field: String!, pred: String) on OBJECT | INTERFACE + +directive @custom(http: CustomHTTP, dql: String) on FIELD_DEFINITION + +directive @lambdaOnMutate(add: Boolean, update: Boolean, delete: Boolean) on OBJECT | INTERFACE + +directive @hasInverse(field: String!) on FIELD_DEFINITION + +directive @search(by: [DgraphIndex!]) on FIELD_DEFINITION + +directive @dgraph(type: String, pred: String) on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @auth(password: AuthRule, query: AuthRule, add: AuthRule, update: AuthRule, delete: AuthRule) on OBJECT | INTERFACE + +directive @remoteResponse(name: String) on FIELD_DEFINITION + +directive @cacheControl(maxAge: Int!) on QUERY + +directive @generate(query: GenerateQueryParams, mutation: GenerateMutationParams, subscription: Boolean) on OBJECT | INTERFACE + +directive @id(interface: Boolean) on FIELD_DEFINITION + +directive @withSubscription on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @remote on OBJECT | INTERFACE | UNION | INPUT_OBJECT | ENUM + +directive @cascade(fields: [String]) on FIELD + +directive @lambda on FIELD_DEFINITION + +input AddApplicationDeploymentInput { + """id is randomly assigned""" + id: String! + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef! + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +type AddApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input AddApplicationDeploymentRiskInput { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef! +} + +type AddApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input AddApplicationEnvironmentInput { + """id is randomly assigned""" + id: String! + environment: EnvironmentRef + application: ApplicationRef! + deploymentTarget: DeploymentTargetRef! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +type AddApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input AddApplicationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef! + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +type AddApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input AddApplicationRiskStatusInput { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironmentRef! +} + +type AddApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input AddArtifactInput { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type AddArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input AddArtifactScanDataInput { + id: String! + artifactSha: String! + tool: String! + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +type AddArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input AddBuildToolInput { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime! +} + +type AddBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input AddCommitMetaDataInput { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef! +} + +type AddCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input AddComponentInput { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +type AddComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input AddCredentialsInput { + data: String! + integrator: IntegratorRef! +} + +type AddCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input AddCWEInput { + id: String! + name: String! + description: String +} + +type AddCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input AddDeploymentTargetInput { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef! + defaultEnvironment: EnvironmentRef! +} + +type AddDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input AddEnvironmentInput { + id: String! + organization: OrganizationRef! + purpose: String! +} + +type AddEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input AddFeatureModeInput { + id: String! + organization: OrganizationRef! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input AddIntegratorInput { + id: String! + organization: OrganizationRef! + name: String! + type: String! + category: String! + credentials: CredentialsRef! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input AddKeyValueInput { + id: String! + name: String! + value: String! +} + +type AddKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input AddOrganizationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type AddOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input AddPolicyDefinitionInput { + id: String! + ownerOrg: OrganizationRef! + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type AddPolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input AddPolicyEnforcementInput { + policy: PolicyDefinitionRef! + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddPolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input AddRoleInput { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type AddRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input AddRunHistoryInput { + policyId: String! + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements: PolicyEnforcementRef! + securityIssue: SecurityIssueRef +} + +type AddRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input AddSchemaVersionInput { + version: String! +} + +type AddSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input AddSecurityIssueInput { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +type AddSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input AddSourceCodeToolInput { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef! +} + +type AddSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input AddTagInput { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcementRef!] +} + +type AddTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input AddTeamInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + organization: OrganizationRef! + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type AddTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input AddToolsUsedInput { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type AddToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input AddVulnerabilityInput { + id: String! + parent: String! + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +type AddVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Application implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + environments(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment!] + team(filter: TeamFilter): Team! + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + environmentsAggregate(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + """id is randomly assigned""" + id: String! + + """artifact that is deployed""" + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact!] + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + + """ + toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools + """ + toolsUsed(filter: ToolsUsedFilter): ToolsUsed! + + """deploymentRisk is the risk status of the deployment""" + deploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRisk + + """policyRunHistory is the policy execution history for this deployment""" + policyRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + artifactAggregate(filter: ArtifactFilter): ArtifactAggregateResult + policyRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ApplicationDeploymentAggregateResult { + count: Int + idMin: String + idMax: String + deployedAtMin: DateTime + deployedAtMax: DateTime + sourceMin: String + sourceMax: String + componentMin: String + componentMax: String + deployedByMin: String + deployedByMax: String +} + +input ApplicationDeploymentFilter { + id: StringHashFilter + deployedAt: DateTimeFilter + deploymentStage: DeploymentStage_exact + component: StringExactFilter_StringRegExpFilter + has: [ApplicationDeploymentHasFilter] + and: [ApplicationDeploymentFilter] + or: [ApplicationDeploymentFilter] + not: ApplicationDeploymentFilter +} + +enum ApplicationDeploymentHasFilter { + id + artifact + applicationEnvironment + deployedAt + deploymentStage + source + component + deployedBy + toolsUsed + deploymentRisk + policyRunHistory +} + +input ApplicationDeploymentOrder { + asc: ApplicationDeploymentOrderable + desc: ApplicationDeploymentOrderable + then: ApplicationDeploymentOrder +} + +enum ApplicationDeploymentOrderable { + id + deployedAt + source + component + deployedBy +} + +input ApplicationDeploymentPatch { + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +input ApplicationDeploymentRef { + """id is randomly assigned""" + id: String + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! +} + +type ApplicationDeploymentRiskAggregateResult { + count: Int + sourceCodeAlertsScoreMin: Int + sourceCodeAlertsScoreMax: Int + sourceCodeAlertsScoreSum: Int + sourceCodeAlertsScoreAvg: Float + buildAlertsScoreMin: Int + buildAlertsScoreMax: Int + buildAlertsScoreSum: Int + buildAlertsScoreAvg: Float + artifactAlertsScoreMin: Int + artifactAlertsScoreMax: Int + artifactAlertsScoreSum: Int + artifactAlertsScoreAvg: Float + deploymentAlertsScoreMin: Int + deploymentAlertsScoreMax: Int + deploymentAlertsScoreSum: Int + deploymentAlertsScoreAvg: Float +} + +input ApplicationDeploymentRiskFilter { + id: [ID!] + deploymentRiskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationDeploymentRiskHasFilter] + and: [ApplicationDeploymentRiskFilter] + or: [ApplicationDeploymentRiskFilter] + not: ApplicationDeploymentRiskFilter +} + +enum ApplicationDeploymentRiskHasFilter { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore + deploymentRiskStatus + applicationDeployment +} + +input ApplicationDeploymentRiskOrder { + asc: ApplicationDeploymentRiskOrderable + desc: ApplicationDeploymentRiskOrderable + then: ApplicationDeploymentRiskOrder +} + +enum ApplicationDeploymentRiskOrderable { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore +} + +input ApplicationDeploymentRiskPatch { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +input ApplicationDeploymentRiskRef { + id: ID + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment { + """id is randomly assigned""" + id: String! + + """environment denotes whether it is dev, prod, staging, non-prod etc""" + environment(filter: EnvironmentFilter): Environment + application(filter: ApplicationFilter): Application! + deploymentTarget(filter: DeploymentTargetFilter): DeploymentTarget! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + riskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatus + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + deploymentsAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationEnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + namespaceMin: String + namespaceMax: String +} + +input ApplicationEnvironmentFilter { + id: StringHashFilter + namespace: StringExactFilter_StringRegExpFilter + has: [ApplicationEnvironmentHasFilter] + and: [ApplicationEnvironmentFilter] + or: [ApplicationEnvironmentFilter] + not: ApplicationEnvironmentFilter +} + +enum ApplicationEnvironmentHasFilter { + id + environment + application + deploymentTarget + namespace + toolsUsed + deployments + riskStatus + metadata +} + +input ApplicationEnvironmentOrder { + asc: ApplicationEnvironmentOrderable + desc: ApplicationEnvironmentOrderable + then: ApplicationEnvironmentOrder +} + +enum ApplicationEnvironmentOrderable { + id + namespace +} + +input ApplicationEnvironmentPatch { + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationEnvironmentRef { + """id is randomly assigned""" + id: String + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + has: [ApplicationHasFilter] + and: [ApplicationFilter] + or: [ApplicationFilter] + not: ApplicationFilter +} + +enum ApplicationHasFilter { + id + name + roles + environments + team + policies + policyEnforcements + metadata +} + +input ApplicationOrder { + asc: ApplicationOrderable + desc: ApplicationOrderable + then: ApplicationOrder +} + +enum ApplicationOrderable { + id + name +} + +input ApplicationPatch { + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +input ApplicationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! +} + +type ApplicationRiskStatusAggregateResult { + count: Int + sourceCodeAlertsMin: Int + sourceCodeAlertsMax: Int + sourceCodeAlertsSum: Int + sourceCodeAlertsAvg: Float + buildAlertsMin: Int + buildAlertsMax: Int + buildAlertsSum: Int + buildAlertsAvg: Float + artifactAlertsMin: Int + artifactAlertsMax: Int + artifactAlertsSum: Int + artifactAlertsAvg: Float + deploymentAlertsMin: Int + deploymentAlertsMax: Int + deploymentAlertsSum: Int + deploymentAlertsAvg: Float + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input ApplicationRiskStatusFilter { + id: [ID!] + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationRiskStatusHasFilter] + and: [ApplicationRiskStatusFilter] + or: [ApplicationRiskStatusFilter] + not: ApplicationRiskStatusFilter +} + +enum ApplicationRiskStatusHasFilter { + riskStatus + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt + applicationEnvironment +} + +input ApplicationRiskStatusOrder { + asc: ApplicationRiskStatusOrderable + desc: ApplicationRiskStatusOrderable + then: ApplicationRiskStatusOrder +} + +enum ApplicationRiskStatusOrderable { + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt +} + +input ApplicationRiskStatusPatch { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +input ApplicationRiskStatusRef { + id: ID + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +type Artifact { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + artifactDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + buildDetails(filter: BuildToolFilter): BuildTool + scanDataAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + artifactDeploymentAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult +} + +type ArtifactAggregateResult { + count: Int + idMin: String + idMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactNameMin: String + artifactNameMax: String + artifactTagMin: String + artifactTagMax: String + artifactShaMin: String + artifactShaMax: String +} + +input ArtifactFilter { + id: StringHashFilter + artifactType: StringExactFilter + artifactName: StringExactFilter_StringRegExpFilter + artifactTag: StringExactFilter_StringRegExpFilter + artifactSha: StringExactFilter + has: [ArtifactHasFilter] + and: [ArtifactFilter] + or: [ArtifactFilter] + not: ArtifactFilter +} + +enum ArtifactHasFilter { + id + artifactType + artifactName + artifactTag + artifactSha + scanData + artifactDeployment + buildDetails +} + +input ArtifactOrder { + asc: ArtifactOrderable + desc: ArtifactOrderable + then: ArtifactOrder +} + +enum ArtifactOrderable { + id + artifactType + artifactName + artifactTag + artifactSha +} + +input ArtifactPatch { + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +input ArtifactRef { + id: String + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type ArtifactScanData { + id: String! + artifactSha: String! + tool: String! + artifactDetails(filter: ArtifactFilter): Artifact + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + componentsAggregate(filter: ComponentFilter): ComponentAggregateResult + artifactRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ArtifactScanDataAggregateResult { + count: Int + idMin: String + idMax: String + artifactShaMin: String + artifactShaMax: String + toolMin: String + toolMax: String + lastScannedAtMin: DateTime + lastScannedAtMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + vulnTrackingIdMin: String + vulnTrackingIdMax: String + vulnCriticalCountMin: Int + vulnCriticalCountMax: Int + vulnCriticalCountSum: Int + vulnCriticalCountAvg: Float + vulnHighCountMin: Int + vulnHighCountMax: Int + vulnHighCountSum: Int + vulnHighCountAvg: Float + vulnMediumCountMin: Int + vulnMediumCountMax: Int + vulnMediumCountSum: Int + vulnMediumCountAvg: Float + vulnLowCountMin: Int + vulnLowCountMax: Int + vulnLowCountSum: Int + vulnLowCountAvg: Float + vulnInfoCountMin: Int + vulnInfoCountMax: Int + vulnInfoCountSum: Int + vulnInfoCountAvg: Float + vulnUnknownCountMin: Int + vulnUnknownCountMax: Int + vulnUnknownCountSum: Int + vulnUnknownCountAvg: Float + vulnNoneCountMin: Int + vulnNoneCountMax: Int + vulnNoneCountSum: Int + vulnNoneCountAvg: Float + vulnTotalCountMin: Int + vulnTotalCountMax: Int + vulnTotalCountSum: Int + vulnTotalCountAvg: Float + sbomUrlMin: String + sbomUrlMax: String + artifactLicenseScanUrlMin: String + artifactLicenseScanUrlMax: String + artifactSecretScanUrlMin: String + artifactSecretScanUrlMax: String + sourceLicenseScanUrlMin: String + sourceLicenseScanUrlMax: String + sourceSecretScanUrlMin: String + sourceSecretScanUrlMax: String + sourceScorecardScanUrlMin: String + sourceScorecardScanUrlMax: String + sourceSemgrepHighSeverityScanUrlMin: String + sourceSemgrepHighSeverityScanUrlMax: String + sourceSemgrepMediumSeverityScanUrlMin: String + sourceSemgrepMediumSeverityScanUrlMax: String + sourceSemgrepLowSeverityScanUrlMin: String + sourceSemgrepLowSeverityScanUrlMax: String + sourceSnykScanUrlMin: String + sourceSnykScanUrlMax: String + virusTotalUrlScanMin: String + virusTotalUrlScanMax: String +} + +input ArtifactScanDataFilter { + id: StringHashFilter + artifactSha: StringExactFilter + tool: StringExactFilter + vulnCriticalCount: IntFilter + vulnHighCount: IntFilter + vulnMediumCount: IntFilter + vulnLowCount: IntFilter + vulnInfoCount: IntFilter + vulnUnknownCount: IntFilter + vulnNoneCount: IntFilter + vulnTotalCount: IntFilter + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ArtifactScanDataHasFilter] + and: [ArtifactScanDataFilter] + or: [ArtifactScanDataFilter] + not: ArtifactScanDataFilter +} + +enum ArtifactScanDataHasFilter { + id + artifactSha + tool + artifactDetails + lastScannedAt + createdAt + vulnTrackingId + components + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan + riskStatus + artifactRunHistory +} + +input ArtifactScanDataOrder { + asc: ArtifactScanDataOrderable + desc: ArtifactScanDataOrderable + then: ArtifactScanDataOrder +} + +enum ArtifactScanDataOrderable { + id + artifactSha + tool + lastScannedAt + createdAt + vulnTrackingId + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan +} + +input ArtifactScanDataPatch { + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input ArtifactScanDataRef { + id: String + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input AuthRule { + and: [AuthRule] + or: [AuthRule] + not: AuthRule + rule: String +} + +"""BuildTool contains data from build tool events.""" +type BuildTool { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + + """artifactNode links a BuildTool node to an artifact""" + artifactNode(filter: ArtifactFilter): Artifact + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + + """sourceCodeTool links a BuildTool node to the source details""" + sourceCodeTool(filter: SourceCodeToolFilter): SourceCodeTool + + """commitMetaData links a BuildTool node to the git commit based details""" + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData!] + createdAt: DateTime! + commitMetaDataAggregate(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult +} + +type BuildToolAggregateResult { + count: Int + idMin: String + idMax: String + buildIdMin: String + buildIdMax: String + toolMin: String + toolMax: String + buildNameMin: String + buildNameMax: String + buildUrlMin: String + buildUrlMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactMin: String + artifactMax: String + artifactTagMin: String + artifactTagMax: String + digestMin: String + digestMax: String + buildDigestMin: String + buildDigestMax: String + buildTimeMin: DateTime + buildTimeMax: DateTime + buildUserMin: String + buildUserMax: String + createdAtMin: DateTime + createdAtMax: DateTime +} + +input BuildToolFilter { + id: StringHashFilter + buildId: StringExactFilter_StringRegExpFilter + tool: StringExactFilter + buildName: StringExactFilter_StringRegExpFilter + buildUrl: StringExactFilter + artifactType: StringExactFilter + artifact: StringExactFilter + artifactTag: StringExactFilter + digest: StringExactFilter + buildDigest: StringExactFilter + has: [BuildToolHasFilter] + and: [BuildToolFilter] + or: [BuildToolFilter] + not: BuildToolFilter +} + +enum BuildToolHasFilter { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + artifactNode + buildTime + buildUser + sourceCodeTool + commitMetaData + createdAt +} + +input BuildToolOrder { + asc: BuildToolOrderable + desc: BuildToolOrderable + then: BuildToolOrder +} + +enum BuildToolOrderable { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + buildTime + buildUser + createdAt +} + +input BuildToolPatch { + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +input BuildToolRef { + """id is randomly assigned""" + id: String + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + """id is randomly assigned""" + id: ID! + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool(filter: BuildToolFilter): BuildTool! +} + +type CommitMetaDataAggregateResult { + count: Int + commitMin: String + commitMax: String + repositoryMin: String + repositoryMax: String + noOfReviewersConfMin: Int + noOfReviewersConfMax: Int + noOfReviewersConfSum: Int + noOfReviewersConfAvg: Float +} + +input CommitMetaDataFilter { + id: [ID!] + has: [CommitMetaDataHasFilter] + and: [CommitMetaDataFilter] + or: [CommitMetaDataFilter] + not: CommitMetaDataFilter +} + +enum CommitMetaDataHasFilter { + commit + repository + commitSign + noOfReviewersConf + reviewerList + approverList + buildTool +} + +input CommitMetaDataOrder { + asc: CommitMetaDataOrderable + desc: CommitMetaDataOrderable + then: CommitMetaDataOrder +} + +enum CommitMetaDataOrderable { + commit + repository + noOfReviewersConf +} + +input CommitMetaDataPatch { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +input CommitMetaDataRef { + """id is randomly assigned""" + id: ID + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +type Component { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability!] + artifacts(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + vulnerabilitiesAggregate(filter: VulnerabilityFilter): VulnerabilityAggregateResult + artifactsAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ComponentAggregateResult { + count: Int + idMin: String + idMax: String + typeMin: String + typeMax: String + nameMin: String + nameMax: String + versionMin: String + versionMax: String + purlMin: String + purlMax: String + cpeMin: String + cpeMax: String + scannedAtMin: DateTime + scannedAtMax: DateTime +} + +input ComponentFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + version: StringExactFilter_StringRegExpFilter + purl: StringExactFilter + cpe: StringExactFilter + has: [ComponentHasFilter] + and: [ComponentFilter] + or: [ComponentFilter] + not: ComponentFilter +} + +enum ComponentHasFilter { + id + type + name + version + licenses + purl + cpe + scannedAt + vulnerabilities + artifacts +} + +input ComponentOrder { + asc: ComponentOrderable + desc: ComponentOrderable + then: ComponentOrder +} + +enum ComponentOrderable { + id + type + name + version + purl + cpe + scannedAt +} + +input ComponentPatch { + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ComponentRef { + id: String + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ContainsFilter { + point: PointRef + polygon: PolygonRef +} + +type Credentials { + id: ID! + data: String! + integrator(filter: IntegratorFilter): Integrator! +} + +type CredentialsAggregateResult { + count: Int + dataMin: String + dataMax: String +} + +input CredentialsFilter { + id: [ID!] + has: [CredentialsHasFilter] + and: [CredentialsFilter] + or: [CredentialsFilter] + not: CredentialsFilter +} + +enum CredentialsHasFilter { + data + integrator +} + +input CredentialsOrder { + asc: CredentialsOrderable + desc: CredentialsOrderable + then: CredentialsOrder +} + +enum CredentialsOrderable { + data +} + +input CredentialsPatch { + data: String + integrator: IntegratorRef +} + +input CredentialsRef { + id: ID + data: String + integrator: IntegratorRef +} + +input CustomHTTP { + url: String! + method: HTTPMethod! + body: String + graphql: String + mode: Mode + forwardHeaders: [String!] + secretHeaders: [String!] + introspectionHeaders: [String!] + skipIntrospection: Boolean +} + +type CWE { + id: String! + name: String! + description: String +} + +type CWEAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + descriptionMin: String + descriptionMax: String +} + +input CWEFilter { + id: StringHashFilter + has: [CWEHasFilter] + and: [CWEFilter] + or: [CWEFilter] + not: CWEFilter +} + +enum CWEHasFilter { + id + name + description +} + +input CWEOrder { + asc: CWEOrderable + desc: CWEOrderable + then: CWEOrder +} + +enum CWEOrderable { + id + name + description +} + +input CWEPatch { + name: String + description: String +} + +input CWERef { + id: String + name: String + description: String +} + +""" +The DateTime scalar type represents date and time as a string in RFC3339 format. +For example: "1985-04-12T23:20:50.52Z" represents 20 mins 50.52 secs after the 23rd hour of Apr 12th 1985 in UTC. +""" +scalar DateTime + +input DateTimeFilter { + eq: DateTime + in: [DateTime] + le: DateTime + lt: DateTime + ge: DateTime + gt: DateTime + between: DateTimeRange +} + +input DateTimeRange { + min: DateTime! + max: DateTime! +} + +type DeleteApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + msg: String + numUids: Int +} + +type DeleteApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + msg: String + numUids: Int +} + +type DeleteApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + msg: String + numUids: Int +} + +type DeleteApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + msg: String + numUids: Int +} + +type DeleteApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + msg: String + numUids: Int +} + +type DeleteArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + msg: String + numUids: Int +} + +type DeleteArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + msg: String + numUids: Int +} + +type DeleteBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + msg: String + numUids: Int +} + +type DeleteCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + msg: String + numUids: Int +} + +type DeleteComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + msg: String + numUids: Int +} + +type DeleteCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + msg: String + numUids: Int +} + +type DeleteCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + msg: String + numUids: Int +} + +type DeleteDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + msg: String + numUids: Int +} + +type DeleteEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + msg: String + numUids: Int +} + +type DeleteFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + msg: String + numUids: Int +} + +type DeleteIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + msg: String + numUids: Int +} + +type DeleteKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + msg: String + numUids: Int +} + +type DeleteOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + msg: String + numUids: Int +} + +type DeletePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + msg: String + numUids: Int +} + +type DeletePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + msg: String + numUids: Int +} + +type DeleteRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + msg: String + numUids: Int +} + +type DeleteRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + msg: String + numUids: Int +} + +type DeleteRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + msg: String + numUids: Int +} + +type DeleteSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + msg: String + numUids: Int +} + +type DeleteSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + msg: String + numUids: Int +} + +type DeleteSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + msg: String + numUids: Int +} + +type DeleteTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + msg: String + numUids: Int +} + +type DeleteTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + msg: String + numUids: Int +} + +type DeleteToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + msg: String + numUids: Int +} + +type DeleteVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + msg: String + numUids: Int +} + +"""DeploymentStage is an enum denoting the stage of the deployment. .""" +enum DeploymentStage { + """deployment is discovered from the events""" + discovered + + """scanning is under process""" + scanning + + """ + deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + """ + current + + """ + deployment becomes a past deployment because another fresh deployment has happened + """ + previous + + """deployment is blocked by the firewall""" + blocked +} + +input DeploymentStage_exact { + eq: DeploymentStage + in: [DeploymentStage] + le: DeploymentStage + lt: DeploymentStage + ge: DeploymentStage + gt: DeploymentStage + between: DeploymentStage +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization(filter: OrganizationFilter): Organization! + defaultEnvironment(filter: EnvironmentFilter): Environment! +} + +type DeploymentTargetAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + ipMin: String + ipMax: String + accountMin: String + accountMax: String + targetTypeMin: String + targetTypeMax: String + regionMin: String + regionMax: String + kubescapeServiceConnectedMin: String + kubescapeServiceConnectedMax: String +} + +input DeploymentTargetFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + ip: StringExactFilter + has: [DeploymentTargetHasFilter] + and: [DeploymentTargetFilter] + or: [DeploymentTargetFilter] + not: DeploymentTargetFilter +} + +enum DeploymentTargetHasFilter { + id + name + ip + account + targetType + region + kubescapeServiceConnected + isFirewall + organization + defaultEnvironment +} + +input DeploymentTargetOrder { + asc: DeploymentTargetOrderable + desc: DeploymentTargetOrderable + then: DeploymentTargetOrder +} + +enum DeploymentTargetOrderable { + id + name + ip + account + targetType + region + kubescapeServiceConnected +} + +input DeploymentTargetPatch { + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +input DeploymentTargetRef { + """id is randomly assigned""" + id: String + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +enum DgraphIndex { + int + int64 + float + bool + hash + exact + term + fulltext + trigram + regexp + year + month + day + hour + geo +} + +"""Environment can be things like dev, prod, staging etc.""" +type Environment { + id: String! + organization(filter: OrganizationFilter): Organization! + purpose: String! +} + +type EnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + purposeMin: String + purposeMax: String +} + +input EnvironmentFilter { + id: StringHashFilter + purpose: StringExactFilter + has: [EnvironmentHasFilter] + and: [EnvironmentFilter] + or: [EnvironmentFilter] + not: EnvironmentFilter +} + +enum EnvironmentHasFilter { + id + organization + purpose +} + +input EnvironmentOrder { + asc: EnvironmentOrderable + desc: EnvironmentOrderable + then: EnvironmentOrder +} + +enum EnvironmentOrderable { + id + purpose +} + +input EnvironmentPatch { + organization: OrganizationRef + purpose: String +} + +input EnvironmentRef { + id: String + organization: OrganizationRef + purpose: String +} + +type FeatureMode { + id: String! + organization(filter: OrganizationFilter): Organization! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type FeatureModeAggregateResult { + count: Int + idMin: String + idMax: String + scanMin: String + scanMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input FeatureModeFilter { + id: StringHashFilter + scan: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [FeatureModeHasFilter] + and: [FeatureModeFilter] + or: [FeatureModeFilter] + not: FeatureModeFilter +} + +enum FeatureModeHasFilter { + id + organization + scan + type + enabled + category + createdAt + updatedAt +} + +input FeatureModeOrder { + asc: FeatureModeOrderable + desc: FeatureModeOrderable + then: FeatureModeOrder +} + +enum FeatureModeOrderable { + id + scan + type + category + createdAt + updatedAt +} + +input FeatureModePatch { + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FeatureModeRef { + id: String + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FloatFilter { + eq: Float + in: [Float] + le: Float + lt: Float + ge: Float + gt: Float + between: FloatRange +} + +input FloatRange { + min: Float! + max: Float! +} + +input GenerateMutationParams { + add: Boolean + update: Boolean + delete: Boolean +} + +input GenerateQueryParams { + get: Boolean + query: Boolean + password: Boolean + aggregate: Boolean +} + +enum HTTPMethod { + GET + POST + PUT + PATCH + DELETE +} + +""" +The Int64 scalar type represents a signed 64‐bit numeric non‐fractional value. +Int64 can represent values in range [-(2^63),(2^63 - 1)]. +""" +scalar Int64 + +input Int64Filter { + eq: Int64 + in: [Int64] + le: Int64 + lt: Int64 + ge: Int64 + gt: Int64 + between: Int64Range +} + +input Int64Range { + min: Int64! + max: Int64! +} + +type Integrator { + id: String! + organization(filter: OrganizationFilter): Organization! + name: String! + type: String! + category: String! + credentials(filter: CredentialsFilter): Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type IntegratorAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input IntegratorFilter { + id: StringHashFilter + name: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [IntegratorHasFilter] + and: [IntegratorFilter] + or: [IntegratorFilter] + not: IntegratorFilter +} + +enum IntegratorHasFilter { + id + organization + name + type + category + credentials + createdAt + updatedAt +} + +input IntegratorOrder { + asc: IntegratorOrderable + desc: IntegratorOrderable + then: IntegratorOrder +} + +enum IntegratorOrderable { + id + name + type + category + createdAt + updatedAt +} + +input IntegratorPatch { + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntegratorRef { + id: String + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntersectsFilter { + polygon: PolygonRef + multiPolygon: MultiPolygonRef +} + +input IntFilter { + eq: Int + in: [Int] + le: Int + lt: Int + ge: Int + gt: Int + between: IntRange +} + +input IntRange { + min: Int! + max: Int! +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! + name: String! + value: String! +} + +type KeyValueAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + valueMin: String + valueMax: String +} + +input KeyValueFilter { + id: StringHashFilter + has: [KeyValueHasFilter] + and: [KeyValueFilter] + or: [KeyValueFilter] + not: KeyValueFilter +} + +enum KeyValueHasFilter { + id + name + value +} + +input KeyValueOrder { + asc: KeyValueOrderable + desc: KeyValueOrderable + then: KeyValueOrder +} + +enum KeyValueOrderable { + id + name + value +} + +input KeyValuePatch { + name: String + value: String +} + +input KeyValueRef { + id: String + name: String + value: String +} + +enum Mode { + BATCH + SINGLE +} + +type MultiPolygon { + polygons: [Polygon!]! +} + +input MultiPolygonRef { + polygons: [PolygonRef!]! +} + +type Mutation { + addSchemaVersion(input: [AddSchemaVersionInput!]!): AddSchemaVersionPayload + updateSchemaVersion(input: UpdateSchemaVersionInput!): UpdateSchemaVersionPayload + deleteSchemaVersion(filter: SchemaVersionFilter!): DeleteSchemaVersionPayload + updateRBAC(input: UpdateRBACInput!): UpdateRBACPayload + deleteRBAC(filter: RBACFilter!): DeleteRBACPayload + addRole(input: [AddRoleInput!]!, upsert: Boolean): AddRolePayload + updateRole(input: UpdateRoleInput!): UpdateRolePayload + deleteRole(filter: RoleFilter!): DeleteRolePayload + addKeyValue(input: [AddKeyValueInput!]!, upsert: Boolean): AddKeyValuePayload + updateKeyValue(input: UpdateKeyValueInput!): UpdateKeyValuePayload + deleteKeyValue(filter: KeyValueFilter!): DeleteKeyValuePayload + addOrganization(input: [AddOrganizationInput!]!, upsert: Boolean): AddOrganizationPayload + updateOrganization(input: UpdateOrganizationInput!): UpdateOrganizationPayload + deleteOrganization(filter: OrganizationFilter!): DeleteOrganizationPayload + addEnvironment(input: [AddEnvironmentInput!]!, upsert: Boolean): AddEnvironmentPayload + updateEnvironment(input: UpdateEnvironmentInput!): UpdateEnvironmentPayload + deleteEnvironment(filter: EnvironmentFilter!): DeleteEnvironmentPayload + addDeploymentTarget(input: [AddDeploymentTargetInput!]!, upsert: Boolean): AddDeploymentTargetPayload + updateDeploymentTarget(input: UpdateDeploymentTargetInput!): UpdateDeploymentTargetPayload + deleteDeploymentTarget(filter: DeploymentTargetFilter!): DeleteDeploymentTargetPayload + addTeam(input: [AddTeamInput!]!, upsert: Boolean): AddTeamPayload + updateTeam(input: UpdateTeamInput!): UpdateTeamPayload + deleteTeam(filter: TeamFilter!): DeleteTeamPayload + addApplication(input: [AddApplicationInput!]!, upsert: Boolean): AddApplicationPayload + updateApplication(input: UpdateApplicationInput!): UpdateApplicationPayload + deleteApplication(filter: ApplicationFilter!): DeleteApplicationPayload + addApplicationEnvironment(input: [AddApplicationEnvironmentInput!]!, upsert: Boolean): AddApplicationEnvironmentPayload + updateApplicationEnvironment(input: UpdateApplicationEnvironmentInput!): UpdateApplicationEnvironmentPayload + deleteApplicationEnvironment(filter: ApplicationEnvironmentFilter!): DeleteApplicationEnvironmentPayload + addApplicationRiskStatus(input: [AddApplicationRiskStatusInput!]!): AddApplicationRiskStatusPayload + updateApplicationRiskStatus(input: UpdateApplicationRiskStatusInput!): UpdateApplicationRiskStatusPayload + deleteApplicationRiskStatus(filter: ApplicationRiskStatusFilter!): DeleteApplicationRiskStatusPayload + addApplicationDeployment(input: [AddApplicationDeploymentInput!]!, upsert: Boolean): AddApplicationDeploymentPayload + updateApplicationDeployment(input: UpdateApplicationDeploymentInput!): UpdateApplicationDeploymentPayload + deleteApplicationDeployment(filter: ApplicationDeploymentFilter!): DeleteApplicationDeploymentPayload + addToolsUsed(input: [AddToolsUsedInput!]!): AddToolsUsedPayload + updateToolsUsed(input: UpdateToolsUsedInput!): UpdateToolsUsedPayload + deleteToolsUsed(filter: ToolsUsedFilter!): DeleteToolsUsedPayload + addApplicationDeploymentRisk(input: [AddApplicationDeploymentRiskInput!]!): AddApplicationDeploymentRiskPayload + updateApplicationDeploymentRisk(input: UpdateApplicationDeploymentRiskInput!): UpdateApplicationDeploymentRiskPayload + deleteApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter!): DeleteApplicationDeploymentRiskPayload + addIntegrator(input: [AddIntegratorInput!]!, upsert: Boolean): AddIntegratorPayload + updateIntegrator(input: UpdateIntegratorInput!): UpdateIntegratorPayload + deleteIntegrator(filter: IntegratorFilter!): DeleteIntegratorPayload + addCredentials(input: [AddCredentialsInput!]!): AddCredentialsPayload + updateCredentials(input: UpdateCredentialsInput!): UpdateCredentialsPayload + deleteCredentials(filter: CredentialsFilter!): DeleteCredentialsPayload + addFeatureMode(input: [AddFeatureModeInput!]!, upsert: Boolean): AddFeatureModePayload + updateFeatureMode(input: UpdateFeatureModeInput!): UpdateFeatureModePayload + deleteFeatureMode(filter: FeatureModeFilter!): DeleteFeatureModePayload + addTag(input: [AddTagInput!]!, upsert: Boolean): AddTagPayload + updateTag(input: UpdateTagInput!): UpdateTagPayload + deleteTag(filter: TagFilter!): DeleteTagPayload + addPolicyDefinition(input: [AddPolicyDefinitionInput!]!, upsert: Boolean): AddPolicyDefinitionPayload + updatePolicyDefinition(input: UpdatePolicyDefinitionInput!): UpdatePolicyDefinitionPayload + deletePolicyDefinition(filter: PolicyDefinitionFilter!): DeletePolicyDefinitionPayload + addPolicyEnforcement(input: [AddPolicyEnforcementInput!]!): AddPolicyEnforcementPayload + updatePolicyEnforcement(input: UpdatePolicyEnforcementInput!): UpdatePolicyEnforcementPayload + deletePolicyEnforcement(filter: PolicyEnforcementFilter!): DeletePolicyEnforcementPayload + addRunHistory(input: [AddRunHistoryInput!]!): AddRunHistoryPayload + updateRunHistory(input: UpdateRunHistoryInput!): UpdateRunHistoryPayload + deleteRunHistory(filter: RunHistoryFilter!): DeleteRunHistoryPayload + addSecurityIssue(input: [AddSecurityIssueInput!]!): AddSecurityIssuePayload + updateSecurityIssue(input: UpdateSecurityIssueInput!): UpdateSecurityIssuePayload + deleteSecurityIssue(filter: SecurityIssueFilter!): DeleteSecurityIssuePayload + addBuildTool(input: [AddBuildToolInput!]!, upsert: Boolean): AddBuildToolPayload + updateBuildTool(input: UpdateBuildToolInput!): UpdateBuildToolPayload + deleteBuildTool(filter: BuildToolFilter!): DeleteBuildToolPayload + addSourceCodeTool(input: [AddSourceCodeToolInput!]!, upsert: Boolean): AddSourceCodeToolPayload + updateSourceCodeTool(input: UpdateSourceCodeToolInput!): UpdateSourceCodeToolPayload + deleteSourceCodeTool(filter: SourceCodeToolFilter!): DeleteSourceCodeToolPayload + addCommitMetaData(input: [AddCommitMetaDataInput!]!): AddCommitMetaDataPayload + updateCommitMetaData(input: UpdateCommitMetaDataInput!): UpdateCommitMetaDataPayload + deleteCommitMetaData(filter: CommitMetaDataFilter!): DeleteCommitMetaDataPayload + addArtifact(input: [AddArtifactInput!]!, upsert: Boolean): AddArtifactPayload + updateArtifact(input: UpdateArtifactInput!): UpdateArtifactPayload + deleteArtifact(filter: ArtifactFilter!): DeleteArtifactPayload + addArtifactScanData(input: [AddArtifactScanDataInput!]!, upsert: Boolean): AddArtifactScanDataPayload + updateArtifactScanData(input: UpdateArtifactScanDataInput!): UpdateArtifactScanDataPayload + deleteArtifactScanData(filter: ArtifactScanDataFilter!): DeleteArtifactScanDataPayload + addComponent(input: [AddComponentInput!]!, upsert: Boolean): AddComponentPayload + updateComponent(input: UpdateComponentInput!): UpdateComponentPayload + deleteComponent(filter: ComponentFilter!): DeleteComponentPayload + addVulnerability(input: [AddVulnerabilityInput!]!, upsert: Boolean): AddVulnerabilityPayload + updateVulnerability(input: UpdateVulnerabilityInput!): UpdateVulnerabilityPayload + deleteVulnerability(filter: VulnerabilityFilter!): DeleteVulnerabilityPayload + addCWE(input: [AddCWEInput!]!, upsert: Boolean): AddCWEPayload + updateCWE(input: UpdateCWEInput!): UpdateCWEPayload + deleteCWE(filter: CWEFilter!): DeleteCWEPayload +} + +input NearFilter { + distance: Float! + coordinate: PointRef! +} + +type Organization implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + teams(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team!] + environments(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + integrators(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator!] + featureModes(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + teamsAggregate(filter: TeamFilter): TeamAggregateResult + environmentsAggregate(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + integratorsAggregate(filter: IntegratorFilter): IntegratorAggregateResult + featureModesAggregate(filter: FeatureModeFilter): FeatureModeAggregateResult +} + +type OrganizationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input OrganizationFilter { + id: StringHashFilter + name: StringExactFilter + has: [OrganizationHasFilter] + and: [OrganizationFilter] + or: [OrganizationFilter] + not: OrganizationFilter +} + +enum OrganizationHasFilter { + id + name + roles + teams + environments + policies + policyEnforcements + integrators + featureModes +} + +input OrganizationOrder { + asc: OrganizationOrderable + desc: OrganizationOrderable + then: OrganizationOrder +} + +enum OrganizationOrderable { + id + name +} + +input OrganizationPatch { + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +input OrganizationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type Point { + longitude: Float! + latitude: Float! +} + +input PointGeoFilter { + near: NearFilter + within: WithinFilter +} + +type PointList { + points: [Point!]! +} + +input PointListRef { + points: [PointRef!]! +} + +input PointRef { + longitude: Float! + latitude: Float! +} + +type PolicyDefinition { + id: String! + ownerOrg(filter: OrganizationFilter): Organization! + ownerTeam(filter: TeamFilter): Team + ownerApplication(filter: ApplicationFilter): Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyDefinitionAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime + policyNameMin: String + policyNameMax: String + categoryMin: String + categoryMax: String + stageMin: String + stageMax: String + descriptionMin: String + descriptionMax: String + scriptMin: String + scriptMax: String + variablesMin: String + variablesMax: String + conditionNameMin: String + conditionNameMax: String + suggestionMin: String + suggestionMax: String +} + +input PolicyDefinitionFilter { + id: StringHashFilter + policyName: StringExactFilter + category: StringExactFilter + stage: StringExactFilter + description: StringExactFilter + scheduledPolicy: Boolean + script: StringExactFilter + variables: StringExactFilter + conditionName: StringExactFilter + suggestion: StringExactFilter + has: [PolicyDefinitionHasFilter] + and: [PolicyDefinitionFilter] + or: [PolicyDefinitionFilter] + not: PolicyDefinitionFilter +} + +enum PolicyDefinitionHasFilter { + id + ownerOrg + ownerTeam + ownerApplication + createdAt + updatedAt + policyName + category + stage + description + scheduledPolicy + script + variables + conditionName + suggestion +} + +input PolicyDefinitionOrder { + asc: PolicyDefinitionOrderable + desc: PolicyDefinitionOrderable + then: PolicyDefinitionOrder +} + +enum PolicyDefinitionOrderable { + id + createdAt + updatedAt + policyName + category + stage + description + script + variables + conditionName + suggestion +} + +input PolicyDefinitionPatch { + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +input PolicyDefinitionRef { + id: String + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy(filter: PolicyDefinitionFilter): PolicyDefinition! + enforcedOrg(filter: OrganizationFilter): Organization + enforcedTeam(filter: TeamFilter): Team + enforcedApplication(filter: ApplicationFilter): Application + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment!] + tags(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag!] + createdAt: DateTime! + updatedAt: DateTime! + environmentsAggregate(filter: EnvironmentFilter): EnvironmentAggregateResult + tagsAggregate(filter: TagFilter): TagAggregateResult +} + +type PolicyEnforcementAggregateResult { + count: Int + datasourceToolMin: String + datasourceToolMax: String + actionMin: String + actionMax: String + conditionValueMin: String + conditionValueMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input PolicyEnforcementFilter { + id: [ID!] + status: Boolean + forceApply: Boolean + datasourceTool: StringExactFilter + action: StringExactFilter + conditionValue: StringExactFilter + has: [PolicyEnforcementHasFilter] + and: [PolicyEnforcementFilter] + or: [PolicyEnforcementFilter] + not: PolicyEnforcementFilter +} + +enum PolicyEnforcementHasFilter { + policy + enforcedOrg + enforcedTeam + enforcedApplication + status + forceApply + severity + datasourceTool + action + conditionValue + environments + tags + createdAt + updatedAt +} + +input PolicyEnforcementOrder { + asc: PolicyEnforcementOrderable + desc: PolicyEnforcementOrderable + then: PolicyEnforcementOrder +} + +enum PolicyEnforcementOrderable { + datasourceTool + action + conditionValue + createdAt + updatedAt +} + +input PolicyEnforcementPatch { + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +input PolicyEnforcementRef { + id: ID + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +type Polygon { + coordinates: [PointList!]! +} + +input PolygonGeoFilter { + near: NearFilter + within: WithinFilter + contains: ContainsFilter + intersects: IntersectsFilter +} + +input PolygonRef { + coordinates: [PointListRef!]! +} + +type Query { + querySchemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + aggregateSchemaVersion(filter: SchemaVersionFilter): SchemaVersionAggregateResult + queryRBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + aggregateRBAC(filter: RBACFilter): RBACAggregateResult + getRole(id: String!): Role + queryRole(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + aggregateRole(filter: RoleFilter): RoleAggregateResult + getKeyValue(id: String!): KeyValue + queryKeyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + aggregateKeyValue(filter: KeyValueFilter): KeyValueAggregateResult + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getEnvironment(id: String!): Environment + queryEnvironment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + aggregateEnvironment(filter: EnvironmentFilter): EnvironmentAggregateResult + getDeploymentTarget(id: String!): DeploymentTarget + queryDeploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + aggregateDeploymentTarget(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + getApplicationRiskStatus(id: ID!): ApplicationRiskStatus + queryApplicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + aggregateApplicationRiskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatusAggregateResult + getApplicationDeployment(id: String!): ApplicationDeployment + queryApplicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + aggregateApplicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + getToolsUsed(id: ID!): ToolsUsed + queryToolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + aggregateToolsUsed(filter: ToolsUsedFilter): ToolsUsedAggregateResult + getApplicationDeploymentRisk(id: ID!): ApplicationDeploymentRisk + queryApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + aggregateApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRiskAggregateResult + getIntegrator(id: String!): Integrator + queryIntegrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + aggregateIntegrator(filter: IntegratorFilter): IntegratorAggregateResult + getCredentials(id: ID!): Credentials + queryCredentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + aggregateCredentials(filter: CredentialsFilter): CredentialsAggregateResult + getFeatureMode(id: String!): FeatureMode + queryFeatureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + aggregateFeatureMode(filter: FeatureModeFilter): FeatureModeAggregateResult + getTag(id: String!): Tag + queryTag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + aggregateTag(filter: TagFilter): TagAggregateResult + getPolicyDefinition(id: String!): PolicyDefinition + queryPolicyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + aggregatePolicyDefinition(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + getPolicyEnforcement(id: ID!): PolicyEnforcement + queryPolicyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + aggregatePolicyEnforcement(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + getRunHistory(id: ID!): RunHistory + queryRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + aggregateRunHistory(filter: RunHistoryFilter): RunHistoryAggregateResult + getSecurityIssue(id: ID!): SecurityIssue + querySecurityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + aggregateSecurityIssue(filter: SecurityIssueFilter): SecurityIssueAggregateResult + getBuildTool(id: String!): BuildTool + queryBuildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + aggregateBuildTool(filter: BuildToolFilter): BuildToolAggregateResult + getSourceCodeTool(id: String!): SourceCodeTool + querySourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + aggregateSourceCodeTool(filter: SourceCodeToolFilter): SourceCodeToolAggregateResult + getCommitMetaData(id: ID!): CommitMetaData + queryCommitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + aggregateCommitMetaData(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult + getArtifact(id: String!): Artifact + queryArtifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + aggregateArtifact(filter: ArtifactFilter): ArtifactAggregateResult + getArtifactScanData(id: String!): ArtifactScanData + queryArtifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + aggregateArtifactScanData(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + getComponent(id: String!): Component + queryComponent(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + aggregateComponent(filter: ComponentFilter): ComponentAggregateResult + getVulnerability(id: String!): Vulnerability + queryVulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + aggregateVulnerability(filter: VulnerabilityFilter): VulnerabilityAggregateResult + getCWE(id: String!): CWE + queryCWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + aggregateCWE(filter: CWEFilter): CWEAggregateResult +} + +interface RBAC { + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult +} + +type RBACAggregateResult { + count: Int +} + +input RBACFilter { + has: [RBACHasFilter] + and: [RBACFilter] + or: [RBACFilter] + not: RBACFilter +} + +enum RBACHasFilter { + roles +} + +input RBACPatch { + roles: [RoleRef!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + scanning +} + +input RiskStatus_exact { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus +} + +input RiskStatus_exact_StringRegExpFilter { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus + regexp: String +} + +type Role { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type RoleAggregateResult { + count: Int + idMin: String + idMax: String + groupMin: String + groupMax: String +} + +input RoleFilter { + id: StringHashFilter + group: StringHashFilter + permission: RolePermission_hash + has: [RoleHasFilter] + and: [RoleFilter] + or: [RoleFilter] + not: RoleFilter +} + +enum RoleHasFilter { + id + group + permission +} + +input RoleOrder { + asc: RoleOrderable + desc: RoleOrderable + then: RoleOrder +} + +enum RoleOrderable { + id + group +} + +input RolePatch { + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +enum RolePermission { + admin + write + read +} + +input RolePermission_hash { + eq: RolePermission + in: [RolePermission] +} + +input RoleRef { + """id is randomly assigned""" + id: String + + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +type RunHistory { + id: ID! + policyId: String! + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment + artifactScan(filter: ArtifactScanDataFilter): ArtifactScanData + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements(filter: PolicyEnforcementFilter): PolicyEnforcement! + securityIssue(filter: SecurityIssueFilter): SecurityIssue +} + +type RunHistoryAggregateResult { + count: Int + policyIdMin: String + policyIdMax: String + PolicyNameMin: String + PolicyNameMax: String + StageMin: String + StageMax: String + ArtifactMin: String + ArtifactMax: String + ArtifactTagMin: String + ArtifactTagMax: String + ArtifactShaMin: String + ArtifactShaMax: String + ArtifactNameTagMin: String + ArtifactNameTagMax: String + DatasourceToolMin: String + DatasourceToolMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + DeployedAtMin: DateTime + DeployedAtMax: DateTime + HashMin: String + HashMax: String + MetaDataMin: String + MetaDataMax: String + FileApiMin: String + FileApiMax: String +} + +input RunHistoryFilter { + id: [ID!] + policyId: StringExactFilter + PolicyName: StringExactFilter + Stage: StringExactFilter + Artifact: StringExactFilter + ArtifactTag: StringExactFilter + ArtifactSha: StringExactFilter + ArtifactNameTag: StringExactFilter_StringRegExpFilter + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + DeployedAt: DateTimeFilter + Pass: Boolean + scheduledPolicy: Boolean + has: [RunHistoryHasFilter] + and: [RunHistoryFilter] + or: [RunHistoryFilter] + not: RunHistoryFilter +} + +enum RunHistoryHasFilter { + policyId + applicationDeployment + artifactScan + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + Pass + MetaData + FileApi + scheduledPolicy + policyEnforcements + securityIssue +} + +input RunHistoryOrder { + asc: RunHistoryOrderable + desc: RunHistoryOrderable + then: RunHistoryOrder +} + +enum RunHistoryOrderable { + policyId + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + MetaData + FileApi +} + +input RunHistoryPatch { + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +input RunHistoryRef { + id: ID + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +type SchemaVersion { + version: String! +} + +type SchemaVersionAggregateResult { + count: Int + versionMin: String + versionMax: String +} + +input SchemaVersionFilter { + has: [SchemaVersionHasFilter] + and: [SchemaVersionFilter] + or: [SchemaVersionFilter] + not: SchemaVersionFilter +} + +enum SchemaVersionHasFilter { + version +} + +input SchemaVersionOrder { + asc: SchemaVersionOrderable + desc: SchemaVersionOrderable + then: SchemaVersionOrder +} + +enum SchemaVersionOrderable { + version +} + +input SchemaVersionPatch { + version: String +} + +input SchemaVersionRef { + version: String +} + +type SecurityIssue { + id: ID! + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + AffectsAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type SecurityIssueAggregateResult { + count: Int + AlertTitleMin: String + AlertTitleMax: String + AlertMessageMin: String + AlertMessageMax: String + SuggestionsMin: String + SuggestionsMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + ActionMin: String + ActionMax: String + JiraUrlMin: String + JiraUrlMax: String + StatusMin: String + StatusMax: String + ReasonMin: String + ReasonMax: String + ErrorMin: String + ErrorMax: String +} + +input SecurityIssueFilter { + id: [ID!] + AlertTitle: StringExactFilter_StringRegExpFilter + AlertMessage: StringExactFilter + Suggestions: StringExactFilter + Severity: Severity_exact + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + Action: StringExactFilter + Status: StringExactFilter + Reason: StringExactFilter + Error: StringExactFilter + has: [SecurityIssueHasFilter] + and: [SecurityIssueFilter] + or: [SecurityIssueFilter] + not: SecurityIssueFilter +} + +enum SecurityIssueHasFilter { + AlertTitle + AlertMessage + Suggestions + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error + Affects +} + +input SecurityIssueOrder { + asc: SecurityIssueOrderable + desc: SecurityIssueOrderable + then: SecurityIssueOrder +} + +enum SecurityIssueOrderable { + AlertTitle + AlertMessage + Suggestions + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error +} + +input SecurityIssuePatch { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +input SecurityIssueRef { + id: ID + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +input Severity_exact { + eq: Severity + in: [Severity] + le: Severity + lt: Severity + ge: Severity + gt: Severity + between: Severity +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool(filter: BuildToolFilter): BuildTool! +} + +type SourceCodeToolAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + scmMin: String + scmMax: String + repositoryMin: String + repositoryMax: String + branchMin: String + branchMax: String + headCommitMin: String + headCommitMax: String + diffCommitsMin: String + diffCommitsMax: String + licenseNameMin: String + licenseNameMax: String + visibilityMin: String + visibilityMax: String + workflowNameMin: String + workflowNameMax: String + parentRepoMin: String + parentRepoMax: String +} + +input SourceCodeToolFilter { + id: StringHashFilter + repository: StringExactFilter_StringRegExpFilter + has: [SourceCodeToolHasFilter] + and: [SourceCodeToolFilter] + or: [SourceCodeToolFilter] + not: SourceCodeToolFilter +} + +enum SourceCodeToolHasFilter { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo + buildTool +} + +input SourceCodeToolOrder { + asc: SourceCodeToolOrderable + desc: SourceCodeToolOrderable + then: SourceCodeToolOrder +} + +enum SourceCodeToolOrderable { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo +} + +input SourceCodeToolPatch { + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input SourceCodeToolRef { + """id is randomly assigned""" + id: String + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input StringExactFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange +} + +input StringExactFilter_StringRegExpFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange + regexp: String +} + +input StringFullTextFilter { + alloftext: String + anyoftext: String +} + +input StringHashFilter { + eq: String + in: [String] +} + +input StringRange { + min: String! + max: String! +} + +input StringRegExpFilter { + regexp: String +} + +input StringTermFilter { + allofterms: String + anyofterms: String +} + +type Subscription { + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult +} + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + policiesAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TagAggregateResult { + count: Int + idMin: String + idMax: String + tagNameMin: String + tagNameMax: String + tagValueMin: String + tagValueMax: String + tagDescriptionMin: String + tagDescriptionMax: String + createdByMin: String + createdByMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input TagFilter { + id: StringExactFilter + tagName: StringExactFilter + tagValue: StringExactFilter + createdBy: StringExactFilter + has: [TagHasFilter] + and: [TagFilter] + or: [TagFilter] + not: TagFilter +} + +enum TagHasFilter { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt + policies +} + +input TagOrder { + asc: TagOrderable + desc: TagOrderable + then: TagOrder +} + +enum TagOrderable { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt +} + +input TagPatch { + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +input TagRef { + id: String + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +type Team implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + organization(filter: OrganizationFilter): Organization! + applications(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application!] + labels(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + applicationsAggregate(filter: ApplicationFilter): ApplicationAggregateResult + labelsAggregate(filter: KeyValueFilter): KeyValueAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TeamAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input TeamFilter { + id: StringHashFilter + name: StringExactFilter + has: [TeamHasFilter] + and: [TeamFilter] + or: [TeamFilter] + not: TeamFilter +} + +enum TeamHasFilter { + id + name + roles + organization + applications + labels + policies + policyEnforcements +} + +input TeamOrder { + asc: TeamOrderable + desc: TeamOrderable + then: TeamOrder +} + +enum TeamOrderable { + id + name +} + +input TeamPatch { + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +input TeamRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type ToolsUsedAggregateResult { + count: Int + sourceMin: String + sourceMax: String + buildMin: String + buildMax: String + artifactMin: String + artifactMax: String + deployMin: String + deployMax: String + sbomMin: String + sbomMax: String +} + +input ToolsUsedFilter { + id: [ID!] + has: [ToolsUsedHasFilter] + and: [ToolsUsedFilter] + or: [ToolsUsedFilter] + not: ToolsUsedFilter +} + +enum ToolsUsedHasFilter { + source + build + artifact + deploy + sbom + misc +} + +input ToolsUsedOrder { + asc: ToolsUsedOrderable + desc: ToolsUsedOrderable + then: ToolsUsedOrder +} + +enum ToolsUsedOrderable { + source + build + artifact + deploy + sbom +} + +input ToolsUsedPatch { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input ToolsUsedRef { + id: ID + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input UpdateApplicationDeploymentInput { + filter: ApplicationDeploymentFilter! + set: ApplicationDeploymentPatch + remove: ApplicationDeploymentPatch +} + +type UpdateApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input UpdateApplicationDeploymentRiskInput { + filter: ApplicationDeploymentRiskFilter! + set: ApplicationDeploymentRiskPatch + remove: ApplicationDeploymentRiskPatch +} + +type UpdateApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input UpdateApplicationEnvironmentInput { + filter: ApplicationEnvironmentFilter! + set: ApplicationEnvironmentPatch + remove: ApplicationEnvironmentPatch +} + +type UpdateApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input UpdateApplicationInput { + filter: ApplicationFilter! + set: ApplicationPatch + remove: ApplicationPatch +} + +type UpdateApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input UpdateApplicationRiskStatusInput { + filter: ApplicationRiskStatusFilter! + set: ApplicationRiskStatusPatch + remove: ApplicationRiskStatusPatch +} + +type UpdateApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input UpdateArtifactInput { + filter: ArtifactFilter! + set: ArtifactPatch + remove: ArtifactPatch +} + +type UpdateArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input UpdateArtifactScanDataInput { + filter: ArtifactScanDataFilter! + set: ArtifactScanDataPatch + remove: ArtifactScanDataPatch +} + +type UpdateArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input UpdateBuildToolInput { + filter: BuildToolFilter! + set: BuildToolPatch + remove: BuildToolPatch +} + +type UpdateBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input UpdateCommitMetaDataInput { + filter: CommitMetaDataFilter! + set: CommitMetaDataPatch + remove: CommitMetaDataPatch +} + +type UpdateCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input UpdateComponentInput { + filter: ComponentFilter! + set: ComponentPatch + remove: ComponentPatch +} + +type UpdateComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input UpdateCredentialsInput { + filter: CredentialsFilter! + set: CredentialsPatch + remove: CredentialsPatch +} + +type UpdateCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input UpdateCWEInput { + filter: CWEFilter! + set: CWEPatch + remove: CWEPatch +} + +type UpdateCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input UpdateDeploymentTargetInput { + filter: DeploymentTargetFilter! + set: DeploymentTargetPatch + remove: DeploymentTargetPatch +} + +type UpdateDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input UpdateEnvironmentInput { + filter: EnvironmentFilter! + set: EnvironmentPatch + remove: EnvironmentPatch +} + +type UpdateEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input UpdateFeatureModeInput { + filter: FeatureModeFilter! + set: FeatureModePatch + remove: FeatureModePatch +} + +type UpdateFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input UpdateIntegratorInput { + filter: IntegratorFilter! + set: IntegratorPatch + remove: IntegratorPatch +} + +type UpdateIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input UpdateKeyValueInput { + filter: KeyValueFilter! + set: KeyValuePatch + remove: KeyValuePatch +} + +type UpdateKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input UpdateOrganizationInput { + filter: OrganizationFilter! + set: OrganizationPatch + remove: OrganizationPatch +} + +type UpdateOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input UpdatePolicyDefinitionInput { + filter: PolicyDefinitionFilter! + set: PolicyDefinitionPatch + remove: PolicyDefinitionPatch +} + +type UpdatePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input UpdatePolicyEnforcementInput { + filter: PolicyEnforcementFilter! + set: PolicyEnforcementPatch + remove: PolicyEnforcementPatch +} + +type UpdatePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input UpdateRBACInput { + filter: RBACFilter! + set: RBACPatch + remove: RBACPatch +} + +type UpdateRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + numUids: Int +} + +input UpdateRoleInput { + filter: RoleFilter! + set: RolePatch + remove: RolePatch +} + +type UpdateRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input UpdateRunHistoryInput { + filter: RunHistoryFilter! + set: RunHistoryPatch + remove: RunHistoryPatch +} + +type UpdateRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input UpdateSchemaVersionInput { + filter: SchemaVersionFilter! + set: SchemaVersionPatch + remove: SchemaVersionPatch +} + +type UpdateSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input UpdateSecurityIssueInput { + filter: SecurityIssueFilter! + set: SecurityIssuePatch + remove: SecurityIssuePatch +} + +type UpdateSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input UpdateSourceCodeToolInput { + filter: SourceCodeToolFilter! + set: SourceCodeToolPatch + remove: SourceCodeToolPatch +} + +type UpdateSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input UpdateTagInput { + filter: TagFilter! + set: TagPatch + remove: TagPatch +} + +type UpdateTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input UpdateTeamInput { + filter: TeamFilter! + set: TeamPatch + remove: TeamPatch +} + +type UpdateTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input UpdateToolsUsedInput { + filter: ToolsUsedFilter! + set: ToolsUsedPatch + remove: ToolsUsedPatch +} + +type UpdateToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input UpdateVulnerabilityInput { + filter: VulnerabilityFilter! + set: VulnerabilityPatch + remove: VulnerabilityPatch +} + +type UpdateVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Vulnerability { + id: String! + parent: String! + ratings: Severity + cwes(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + cwesAggregate(filter: CWEFilter): CWEAggregateResult + affectsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type VulnerabilityAggregateResult { + count: Int + idMin: String + idMax: String + parentMin: String + parentMax: String + summaryMin: String + summaryMax: String + detailMin: String + detailMax: String + recommendationMin: String + recommendationMax: String + publishedMin: DateTime + publishedMax: DateTime + modifiedMin: DateTime + modifiedMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + cvssMin: Float + cvssMax: Float + cvssSum: Float + cvssAvg: Float + priorityMin: String + priorityMax: String + epssMin: Float + epssMax: Float + epssSum: Float + epssAvg: Float + cisa_kevMin: String + cisa_kevMax: String +} + +input VulnerabilityFilter { + id: StringHashFilter + parent: StringExactFilter_StringRegExpFilter + ratings: Severity_exact + createdAt: DateTimeFilter + cvss: FloatFilter + priority: StringExactFilter_StringRegExpFilter + epss: FloatFilter + cisa_kev: StringExactFilter_StringRegExpFilter + has: [VulnerabilityHasFilter] + and: [VulnerabilityFilter] + or: [VulnerabilityFilter] + not: VulnerabilityFilter +} + +enum VulnerabilityHasFilter { + id + parent + ratings + cwes + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev + affects +} + +input VulnerabilityOrder { + asc: VulnerabilityOrderable + desc: VulnerabilityOrderable + then: VulnerabilityOrder +} + +enum VulnerabilityOrderable { + id + parent + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev +} + +input VulnerabilityPatch { + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input VulnerabilityRef { + id: String + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input WithinFilter { + polygon: PolygonRef! +} + diff --git a/featureTable/scripts.go b/featureTable/scripts.go new file mode 100644 index 0000000..ab72b8f --- /dev/null +++ b/featureTable/scripts.go @@ -0,0 +1,96 @@ +package featuretable + +type FeatureTable struct { + Id string + Enabled bool + Type string + Scan string + Category string +} + +var allRecords = []FeatureTable{ + { + Id: "1", + Enabled: true, + Type: "trivy", + Scan: "licensescan", + Category: "scanningtool", + }, + { + Id: "2", + Enabled: true, + Type: "trivy", + Scan: "vulnerabilityscan", + Category: "scanningtool", + }, + { + Id: "3", + Enabled: true, + Type: "trivy", + Scan: "helmscan", + Category: "scanningtool", + }, + { + Id: "4", + Enabled: true, + Type: "trivy", + Scan: "secretscanforsource", + Category: "scanningtool", + }, + { + Id: "5", + Enabled: true, + Type: "trivy", + Scan: "secretscanforcontainers", + Category: "scanningtool", + }, + { + Id: "6", + Enabled: true, + Type: "openssf", + Scan: "compliancescan", + Category: "scanningtool", + }, + { + Id: "7", + Enabled: false, + Type: "semgrep", + Scan: "sastdastscan", + Category: "scanningtool", + }, + { + Id: "8", + Enabled: true, + Type: "kubescape", + Scan: "cisbenchmarkscan", + Category: "scanningtool", + }, + { + Id: "9", + Enabled: true, + Type: "kubescape", + Scan: "mitreandatt&ckscan", + Category: "scanningtool", + }, + { + Id: "10", + Enabled: true, + Type: "kubescape", + Scan: "nsa-cisascan", + Category: "scanningtool", + }, + { + Id: "11", + Enabled: true, + Type: "trivy", + Scan: "licensescanforsource", + Category: "scanningtool", + }, + { + Id: "12", + Enabled: false, + Type: "snyk", + Scan: "sastnykscan", + Category: "scanningtool", + }, +} diff --git a/featureTable/upgradeSteps.go b/featureTable/upgradeSteps.go new file mode 100644 index 0000000..141971e --- /dev/null +++ b/featureTable/upgradeSteps.go @@ -0,0 +1,42 @@ +package featuretable + +import ( + "context" + "fmt" + "time" + "upgradationScript/logger" + + "github.com/Khan/genqlient/graphql" +) + +func FeatTableUpgradeSteps(graphqlClient graphql.Client, orgId string) error { + + logger.Sl.Debugf("---------------------Starting Feature Table ingestion---------------------") + + for i, eachFeatRec := range allRecords { + + logger.Sl.Debugf("Starting Feature Table ingestion for iteration: %d", i) + + exists, err := checkIfFeatureRecordExists(context.Background(), graphqlClient, eachFeatRec.Type, eachFeatRec.Scan) + if err != nil { + return fmt.Errorf("checkIfFeatureRecordExists: iter: %d error: %s", i, err.Error()) + } + + if len(exists.QueryFeatureMode) != 0 { + logger.Sl.Debugf("Record for iter: %d already exists", i) + continue + } + + now := time.Now() + if _, err := addNewRecordFeatureTable(context.Background(), graphqlClient, eachFeatRec.Id, orgId, eachFeatRec.Scan, eachFeatRec.Type, eachFeatRec.Category, &eachFeatRec.Enabled, &now); err != nil { + return fmt.Errorf("addNewRecordFeatureTable: iter: %d error: %s", i, err.Error()) + } + + logger.Sl.Debugf("Added into Feature Table for iteration: %d", i) + + } + + logger.Sl.Debugf("---------------------Completed Feature Table ingestion---------------------") + return nil + +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..43e3f03 --- /dev/null +++ b/go.mod @@ -0,0 +1,42 @@ +module upgradationScript + +go 1.22.0 + +require ( + github.com/Khan/genqlient v0.7.0 + github.com/OpsMx/go-app-base v0.0.24 + github.com/OpsMx/ssd-jwt-auth v0.5.1 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.26 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 + github.com/aws/smithy-go v1.20.3 + github.com/vektah/gqlparser v1.3.1 + go.uber.org/zap v1.27.0 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/agnivade/levenshtein v1.1.1 // indirect + github.com/alexflint/go-arg v1.4.2 // indirect + github.com/alexflint/go-scalar v1.0.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/vektah/gqlparser/v2 v2.5.11 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/tools v0.18.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..9375160 --- /dev/null +++ b/go.sum @@ -0,0 +1,98 @@ +github.com/Khan/genqlient v0.7.0 h1:GZ1meyRnzcDTK48EjqB8t3bcfYvHArCUUvgOwpz1D4w= +github.com/Khan/genqlient v0.7.0/go.mod h1:HNyy3wZvuYwmW3Y7mkoQLZsa/R5n5yIRajS1kPBvSFM= +github.com/OpsMx/go-app-base v0.0.24 h1:ToKpkiVNk803DUFz0VEyi2r/ZUiZN1seNsIY4zbGxPs= +github.com/OpsMx/go-app-base v0.0.24/go.mod h1:jKQSuIBTo9OcWOK1XC91jr7cbcEQHUU8FojDHj+nf0g= +github.com/OpsMx/ssd-jwt-auth v0.5.1 h1:K25NdPBVFDsLFkkCFH+aOpFMKsRVzYlKX9+37M3K3mA= +github.com/OpsMx/ssd-jwt-auth v0.5.1/go.mod h1:mKiyZ8p8CEvB+f1SBDOzOTtnRmhrz5kbUdp6g93cF+0= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/alexflint/go-arg v1.4.2 h1:lDWZAXxpAnZUq4qwb86p/3rIJJ2Li81EoMbTMujhVa0= +github.com/alexflint/go-arg v1.4.2/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM= +github.com/alexflint/go-scalar v1.0.0 h1:NGupf1XV/Xb04wXskDFzS0KWOLH632W/EO4fAFi+A70= +github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= +github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/bradleyjkemp/cupaloy/v2 v2.6.0 h1:knToPYa2xtfg42U3I6punFEjaGFKWQRXJwj0JTv4mTs= +github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/vektah/gqlparser v1.3.1 h1:8b0IcD3qZKWJQHSzynbDlrtP3IxVydZ2DZepCGofqfU= +github.com/vektah/gqlparser v1.3.1/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74= +github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= +github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/graphqlFunc/adminQueries.go b/graphqlFunc/adminQueries.go new file mode 100644 index 0000000..3c11870 --- /dev/null +++ b/graphqlFunc/adminQueries.go @@ -0,0 +1,253 @@ +package graphqlfunc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/Khan/genqlient/graphql" + "github.com/vektah/gqlparser/gqlerror" +) + +type DgraphResponse struct { + Data struct { + GetGQLSchema struct { + Schema string `json:"schema,omitempty" yaml:"schema,omitempty"` + } `json:"getGQLSchema,omitempty" yaml:"getGQLSchema,omitempty"` + Backup struct { + TaskId string `json:"taskId,omitempty" yaml:"taskId,omitempty"` + } `json:"backup,omitempty" yaml:"backup,omitempty"` + Restore struct { + TaskId string `json:"taskId,omitempty" yaml:"taskId,omitempty"` + } `json:"restore,omitempty" yaml:"restore,omitempty"` + Task struct { + Status string `json:"status,omitempty" yaml:"status,omitempty"` + } `json:"task,omitempty" yaml:"task,omitempty"` + } `json:"data,omitempty" yaml:"data,omitempty"` + Extensions map[string]interface{} `json:"extensions,omitempty" yaml:"extensions,omitempty"` + Errors gqlerror.List `json:"errors,omitempty" yaml:"errors,omitempty"` +} + +func DgraphAdminCall(dgraphUrl string, body []byte) (DgraphResponse, error) { + dgraphUrl = fmt.Sprintf("%s/admin", dgraphUrl) + httpClient := http.Client{} + httpReq, err := http.NewRequest( + http.MethodPost, + dgraphUrl, + bytes.NewReader(body), + ) + if err != nil { + return DgraphResponse{}, errors.New("DgraphAdminCall: http.NewRequest: error: " + err.Error()) + } + + httpReq.Header.Set("Content-Type", "application/json") + + httpResp, err := httpClient.Do(httpReq) + if err != nil { + return DgraphResponse{}, errors.New("DgraphAdminCall: httpClient.Do: error: " + err.Error()) + } + defer httpResp.Body.Close() + + if httpResp.StatusCode != http.StatusOK { + var respBody []byte + respBody, err = io.ReadAll(httpResp.Body) + if err != nil { + respBody = []byte(fmt.Sprintf("", err)) + } + return DgraphResponse{}, fmt.Errorf("DgraphAdminCall: returned error %v: %s", httpResp.Status, respBody) + } + + responseBytes, err := io.ReadAll(httpResp.Body) + if err != nil { + return DgraphResponse{}, errors.New("DgraphAdminCall: io.ReadAll: error: " + err.Error()) + } + + var resp DgraphResponse + err = json.Unmarshal(responseBytes, &resp) + if err != nil { + return DgraphResponse{}, fmt.Errorf("DgraphAdminCall: json.Unmarshal error %v: %s", err, string(responseBytes)) + } + + if len(resp.Errors) > 0 { + return DgraphResponse{}, fmt.Errorf("DgraphAdminCall: resp.Errors error %v", resp.Errors) + } + + return resp, nil +} + +func RetrieveSchema(dgraphUrl string) (string, error) { + QueryGetGQLSchema_Operation := ` + query GetGQLSchema { + getGQLSchema { + schema + } + } + ` + + req := &graphql.Request{ + OpName: "GetGQLSchema", + Query: QueryGetGQLSchema_Operation, + } + + body, err := json.Marshal(req) + if err != nil { + return "", fmt.Errorf("RetrieveSchema: dgraphUrl: %s json.Marshal: error: %s", dgraphUrl, err.Error()) + } + + resp, err := DgraphAdminCall(dgraphUrl, body) + if err != nil { + return "", fmt.Errorf("RetrieveSchema: dgraphUrl: %s error: %s", dgraphUrl, err.Error()) + } + + return resp.Data.GetGQLSchema.Schema, nil +} + +func setDrainingModeFalse(dgraphUrl string) error { + drainingQuery := `mutation Draining { + draining(enable: false) { + response { + message + } + } + }` + + req := &graphql.Request{ + OpName: "Draining", + Query: drainingQuery, + } + body, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("setDrainingModeFalse: json.Marshal: error: %s", err.Error()) + } + + if _, err := DgraphAdminCall(dgraphUrl, body); err != nil { + return fmt.Errorf("setDrainingModeFalse dgraphUrl: %s error: %s", dgraphUrl, err.Error()) + } + + return nil +} + +func generateDgraphBkp(dgraphUrl string) error { + + bkpQuery := `mutation Backup { + backup(input: { destination: "/dgraph/bkp", forceFull: true }) { + taskId + } + }` + + req := &graphql.Request{ + OpName: "Backup", + Query: bkpQuery, + } + body, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("GenerateDgraphBkp: json.Marshal: error: %s", err.Error()) + } + + resp, err := DgraphAdminCall(dgraphUrl, body) + if err != nil { + return fmt.Errorf("GenerateDgraphBkp: backup init dgraphUrl: %s error: %s", dgraphUrl, err.Error()) + } + + taskId := resp.Data.Backup.TaskId + + taskQuery := ` + query Task { + task(input: { id: "%s" }) { + status + } + } + ` + + for { + req := &graphql.Request{ + OpName: "Task", + Query: fmt.Sprintf(taskQuery, taskId), + } + body, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("GenerateDgraphBkp: task json.Marshal: error: %s", err.Error()) + } + + resp, err := DgraphAdminCall(dgraphUrl, body) + if err != nil { + return fmt.Errorf("GenerateDgraphBkp: backup task poll dgraphUrl: %s error: %s", dgraphUrl, err.Error()) + } + + if strings.EqualFold(resp.Data.Task.Status, "Success") { + break + } + time.Sleep(1 * time.Minute) + } + + return nil +} + +type SchemaResult struct { + Errors []SchemaResultError `json:"errors,omitempty" yaml:"errors,omitempty"` +} + +type SchemaResultError struct { + Message string `json:"message,omitempty" yaml:"message,omitempty"` +} + +func UpdateSchema(url, authToken string, schema []byte) error { + + if err := setDrainingModeFalse(url); err != nil { + return err + } + + ctx := context.Background() + + req, err := makeRequest(ctx, url+"/admin/schema", http.MethodPost, authToken, bytes.NewReader(schema)) + if err != nil { + return err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("submit returned status %d", resp.StatusCode) + } + + r, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var schemaResult SchemaResult + err = json.Unmarshal(r, &schemaResult) + if err != nil { + return err + } + + if len(schemaResult.Errors) != 0 { + fmt.Println() + for _, e := range schemaResult.Errors { + log.Printf("ERROR: %s", e.Message) + } + return fmt.Errorf("submit returned errors") + } + + return nil +} + +func makeRequest(ctx context.Context, url string, method string, authToken string, data io.Reader) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, url, data) + if err != nil { + return nil, err + } + if authToken != "" { + req.Header.Add("X-Dgraph-AuthToken", authToken) + } + return req, err +} diff --git a/graphqlFunc/backup.go b/graphqlFunc/backup.go new file mode 100644 index 0000000..bfffe76 --- /dev/null +++ b/graphqlFunc/backup.go @@ -0,0 +1,196 @@ +package graphqlfunc + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + "upgradationScript/logger" +) + +const bucketName = "dgraph-backup" + +func BackupAndRestoreDgraph(dgraphUrl, restoreServiceUrl string) error { + + logger.Logger.Info("----------Backup&Restore Process Begin-------------------") + + s3Url, found := os.LookupEnv("S3_ENDPOINT_URL") + if !found { + return fmt.Errorf("envar S3_ENDPOINT_URL is not set") + } + + if _, found := os.LookupEnv("AWS_ACCESS_KEY_ID"); !found { + return fmt.Errorf("envar AWS_ACCESS_KEY_ID is not set") + } + + if _, found = os.LookupEnv("AWS_SECRET_ACCESS_KEY"); !found { + return fmt.Errorf("envar AWS_SECRET_ACCESS_KEY is not set") + } + + if err := generateDgraphBkp(dgraphUrl); err != nil { + return err + } + + now := time.Now().UTC() + unixTimestamp := now.Unix() + formattedTime := now.Format("02-01-2006") + "-" + fmt.Sprint(unixTimestamp) + fileName := fmt.Sprintf("bkp-%v.tar.gz", formattedTime) + + filePath := fmt.Sprintf("/app/scanResult/%s", fileName) + + if err := tarBkpFile("/app/dgraph/bkp", filePath); err != nil { + return fmt.Errorf("tarBkpFile: error: %s", err.Error()) + } + + if err := uploadBkpFile(s3Url, fileName); err != nil { + return fmt.Errorf("%s", err.Error()) + } + + return restoreTheBkpFileInDgraph(fileName, restoreServiceUrl) + +} + +func tarBkpFile(source, target string) error { + + // Create the output file + outFile, err := os.Create(target) + if err != nil { + return fmt.Errorf("could not create target file: %v", err) + } + defer outFile.Close() + + // Create a gzip writer + gzWriter := gzip.NewWriter(outFile) + defer gzWriter.Close() + + // Create a tar writer + tarWriter := tar.NewWriter(gzWriter) + defer tarWriter.Close() + + // Walk through the source directory and add files to the tar + err = filepath.Walk(source, func(fileName string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + // Get file header + header, err := tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return err + } + + // Update the name to maintain directory structure + header.Name, err = filepath.Rel(filepath.Dir(source), fileName) + if err != nil { + return err + } + + // Write the header + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + // If it's a file, write its content + if !fi.Mode().IsRegular() { + return nil + } + file, err := os.Open(fileName) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(tarWriter, file); err != nil { + return err + } + + return nil + }) + + if err != nil { + return fmt.Errorf("error walking through source directory: %v", err) + } + + return os.RemoveAll(source) + +} + +func uploadBkpFile(s3Url, fileName string) error { + s3client, err := MakeS3Client(context.TODO(), s3Url) + if err != nil { + err = fmt.Errorf("uploadBkpFile: unable to make s3 client %s", err.Error()) + return err + } + + key := s3client.MakeS3Key(bucketName, "backups", fileName) + + bkpPath := fmt.Sprintf("/app/scanResult/%s", fileName) + bkpFile, err := os.Open(bkpPath) + if err != nil { + return fmt.Errorf("uploadBkpFile: unable to open bkp file %s", err.Error()) + } + + if err := s3client.upload(context.TODO(), bucketName, key, bkpFile); err != nil { + return fmt.Errorf("uploadBkpFile: unable to upload bkp file %s error: %s", key, err.Error()) + } + + return os.Remove(bkpPath) +} + +func restoreTheBkpFileInDgraph(fileName, restoreServiceUrl string) error { + restoreApi := "/api/v1/restore" + + restoreUrl, err := url.JoinPath(restoreServiceUrl, restoreApi) + if err != nil { + return fmt.Errorf("restoreTheBkpFileInDgraph: error: %s", err.Error()) + } + + httpclient := &http.Client{} + req, err := http.NewRequest( + http.MethodGet, + restoreUrl, + nil, + ) + if err != nil { + return fmt.Errorf("restoreTheBkpFileInDgraph: NewRequest error: %s", err.Error()) + } + + query := req.URL.Query() + query.Set("file", fileName) + req.URL.RawQuery = query.Encode() + + resp, err := httpclient.Do(req) + if err != nil { + return fmt.Errorf("restoreTheBkpFileInDgraph: Do error: %s", err.Error()) + } + + if resp.StatusCode == http.StatusOK { + logger.Logger.Info("----------Backup&Restore Process Completed-------------------") + return nil + } + + type HttpError struct { + Error string `json:"error,omitempty"` + } + + defer resp.Body.Close() + responseBytes, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("restoreTheBkpFileInDgraph: ReadAll error: %s", err.Error()) + } + + var stdError HttpError + if err := json.Unmarshal(responseBytes, &stdError); err != nil { + return fmt.Errorf("restoreTheBkpFileInDgraph: Unmarshal error: %s", err.Error()) + } + + return fmt.Errorf("restoreTheBkpFileInDgraph: %s", stdError.Error) + +} diff --git a/graphqlFunc/genqlient.yaml b/graphqlFunc/genqlient.yaml new file mode 100644 index 0000000..b141854 --- /dev/null +++ b/graphqlFunc/genqlient.yaml @@ -0,0 +1,17 @@ +schema: schema.graphql +operations: +- queries.graphql +generated: schema-generated.go +package: graphqlfunc +use_struct_references: true +bindings: + Boolean: + type: "*bool" + DateTime: + type: "*time.Time" + Int64: + type: int64 + Int: + type: "*int" + ID: + type: "*string" diff --git a/graphqlFunc/graphqlClient.go b/graphqlFunc/graphqlClient.go new file mode 100644 index 0000000..666ac90 --- /dev/null +++ b/graphqlFunc/graphqlClient.go @@ -0,0 +1,36 @@ +package graphqlfunc + +import ( + "fmt" + "net/http" + "time" + + "github.com/Khan/genqlient/graphql" +) + +const OpsmxAuthHeader = "X-OpsMx-Auth" + +type authedTransport struct { + token string + wrapped http.RoundTripper +} + +func (t *authedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set(OpsmxAuthHeader, t.token) + return t.wrapped.RoundTrip(req) +} + +func NewClient(graphqlUrl, graphqlToken string) graphql.Client { + + graphqlUrl = fmt.Sprintf("%s/graphql", graphqlUrl) + + httpClient := http.Client{ + Timeout: 30 * time.Second, + Transport: &authedTransport{ + token: graphqlToken, + wrapped: http.DefaultTransport, + }, + } + + return graphql.NewClient(graphqlUrl, &httpClient) +} diff --git a/graphqlFunc/queries.graphql b/graphqlFunc/queries.graphql new file mode 100644 index 0000000..e53c765 --- /dev/null +++ b/graphqlFunc/queries.graphql @@ -0,0 +1,5 @@ +query GetOrgId { + queryOrganization { + id + } +} diff --git a/graphqlFunc/s3.go b/graphqlFunc/s3.go new file mode 100644 index 0000000..c239333 --- /dev/null +++ b/graphqlFunc/s3.go @@ -0,0 +1,107 @@ +package graphqlfunc + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + "os" + "path" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" +) + +type S3Client struct { + S3Client *s3.Client + endpoint *url.URL +} + +func CleanS3Name(name string) string { + return strings.ReplaceAll(name, ":", "-") +} + +func (*S3Client) MakeS3Key(bucket, prefix, name string) string { + keyparts := []string{bucket} + if prefix != "" { + keyparts = append(keyparts, prefix) + } + keyparts = append(keyparts, CleanS3Name(name)) + return path.Join(keyparts...) +} + +// Override the default endpoint resolver if needed +func (mgr *S3Client) ResolveEndpoint(ctx context.Context, params s3.EndpointParameters) (smithyendpoints.Endpoint, error) { + return smithyendpoints.Endpoint{ + URI: *mgr.endpoint, + }, nil +} + +func makeAWSConfig(ctx context.Context) (aws.Config, error) { + httpClient := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + tr.TLSClientConfig.MinVersion = tls.VersionTLS13 + }) + + return config.LoadDefaultConfig(ctx, + config.WithHTTPClient(httpClient), + config.WithClientLogMode(aws.LogDeprecatedUsage), + ) +} + +func MakeS3Client(ctx context.Context, endpoint string) (*S3Client, error) { + + cfg, err := makeAWSConfig(ctx) + if err != nil { + err = fmt.Errorf("MakeS3Client: unable to make aws config %s", err.Error()) + return nil, err + } + + client := &S3Client{} + + uri, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + client.endpoint = uri + + s3Client := s3.NewFromConfig(cfg, func(o *s3.Options) { + if client.endpoint != nil { + o.EndpointResolverV2 = client + } + }) + client.S3Client = s3Client + + return client, nil +} + +func (u *S3Client) upload(ctx context.Context, bucketName, key string, outfile *os.File) error { + + partMBS := int64(10) + if _, err := outfile.Seek(0, 0); err != nil { + return err + } + + uploader := manager.NewUploader(u.S3Client, func(u *manager.Uploader) { + u.PartSize = partMBS * 1024 * 1024 + }) + + _, err := uploader.Upload(ctx, &s3.PutObjectInput{ + ACL: types.ObjectCannedACLBucketOwnerFullControl, + Bucket: aws.String(bucketName), + Key: aws.String(key), + Body: outfile, + Tagging: aws.String("ItemType=docker-image"), + ContentType: aws.String("application/tar+gzip"), + }) + return err +} diff --git a/graphqlFunc/schema-generated.go b/graphqlFunc/schema-generated.go new file mode 100644 index 0000000..29bc8ab --- /dev/null +++ b/graphqlFunc/schema-generated.go @@ -0,0 +1,59 @@ +// Code generated by github.com/Khan/genqlient, DO NOT EDIT. + +package graphqlfunc + +import ( + "context" + + "github.com/Khan/genqlient/graphql" +) + +// GetOrgIdQueryOrganization includes the requested fields of the GraphQL type Organization. +type GetOrgIdQueryOrganization struct { + // id is randomly assigned + Id string `json:"id"` +} + +// GetId returns GetOrgIdQueryOrganization.Id, and is useful for accessing the field via an interface. +func (v *GetOrgIdQueryOrganization) GetId() string { return v.Id } + +// GetOrgIdResponse is returned by GetOrgId on success. +type GetOrgIdResponse struct { + QueryOrganization []*GetOrgIdQueryOrganization `json:"queryOrganization"` +} + +// GetQueryOrganization returns GetOrgIdResponse.QueryOrganization, and is useful for accessing the field via an interface. +func (v *GetOrgIdResponse) GetQueryOrganization() []*GetOrgIdQueryOrganization { + return v.QueryOrganization +} + +// The query or mutation executed by GetOrgId. +const GetOrgId_Operation = ` +query GetOrgId { + queryOrganization { + id + } +} +` + +func GetOrgId( + ctx_ context.Context, + client_ graphql.Client, +) (*GetOrgIdResponse, error) { + req_ := &graphql.Request{ + OpName: "GetOrgId", + Query: GetOrgId_Operation, + } + var err_ error + + var data_ GetOrgIdResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} diff --git a/graphqlFunc/schema.graphql b/graphqlFunc/schema.graphql new file mode 100644 index 0000000..183bbbd --- /dev/null +++ b/graphqlFunc/schema.graphql @@ -0,0 +1,4664 @@ +directive @secret(field: String!, pred: String) on OBJECT | INTERFACE + +directive @custom(http: CustomHTTP, dql: String) on FIELD_DEFINITION + +directive @lambdaOnMutate(add: Boolean, update: Boolean, delete: Boolean) on OBJECT | INTERFACE + +directive @hasInverse(field: String!) on FIELD_DEFINITION + +directive @search(by: [DgraphIndex!]) on FIELD_DEFINITION + +directive @dgraph(type: String, pred: String) on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @auth(password: AuthRule, query: AuthRule, add: AuthRule, update: AuthRule, delete: AuthRule) on OBJECT | INTERFACE + +directive @remoteResponse(name: String) on FIELD_DEFINITION + +directive @cacheControl(maxAge: Int!) on QUERY + +directive @generate(query: GenerateQueryParams, mutation: GenerateMutationParams, subscription: Boolean) on OBJECT | INTERFACE + +directive @id(interface: Boolean) on FIELD_DEFINITION + +directive @withSubscription on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @remote on OBJECT | INTERFACE | UNION | INPUT_OBJECT | ENUM + +directive @cascade(fields: [String]) on FIELD + +directive @lambda on FIELD_DEFINITION + +input AddApplicationDeploymentInput { + """id is randomly assigned""" + id: String! + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef! + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +type AddApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input AddApplicationDeploymentRiskInput { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef! +} + +type AddApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input AddApplicationEnvironmentInput { + """id is randomly assigned""" + id: String! + environment: EnvironmentRef + application: ApplicationRef! + deploymentTarget: DeploymentTargetRef! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +type AddApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input AddApplicationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef! + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +type AddApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input AddApplicationRiskStatusInput { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironmentRef! +} + +type AddApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input AddArtifactInput { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type AddArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input AddArtifactScanDataInput { + id: String! + artifactSha: String! + tool: String! + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +type AddArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input AddBuildToolInput { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime! +} + +type AddBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input AddCommitMetaDataInput { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef! +} + +type AddCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input AddComponentInput { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +type AddComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input AddCredentialsInput { + data: String! + integrator: IntegratorRef! +} + +type AddCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input AddCWEInput { + id: String! + name: String! + description: String +} + +type AddCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input AddDeploymentTargetInput { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef! + defaultEnvironment: EnvironmentRef! +} + +type AddDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input AddEnvironmentInput { + id: String! + organization: OrganizationRef! + purpose: String! +} + +type AddEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input AddFeatureModeInput { + id: String! + organization: OrganizationRef! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input AddIntegratorInput { + id: String! + organization: OrganizationRef! + name: String! + type: String! + category: String! + credentials: CredentialsRef! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input AddKeyValueInput { + id: String! + name: String! + value: String! +} + +type AddKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input AddOrganizationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type AddOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input AddPolicyDefinitionInput { + id: String! + ownerOrg: OrganizationRef! + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type AddPolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input AddPolicyEnforcementInput { + policy: PolicyDefinitionRef! + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddPolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input AddRoleInput { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type AddRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input AddRunHistoryInput { + policyId: String! + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements: PolicyEnforcementRef! + securityIssue: SecurityIssueRef +} + +type AddRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input AddSchemaVersionInput { + version: String! +} + +type AddSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input AddSecurityIssueInput { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +type AddSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input AddSourceCodeToolInput { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef! +} + +type AddSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input AddTagInput { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcementRef!] +} + +type AddTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input AddTeamInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + organization: OrganizationRef! + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type AddTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input AddToolsUsedInput { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type AddToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input AddVulnerabilityInput { + id: String! + parent: String! + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +type AddVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Application implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + environments(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment!] + team(filter: TeamFilter): Team! + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + environmentsAggregate(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + """id is randomly assigned""" + id: String! + + """artifact that is deployed""" + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact!] + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + + """ + toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools + """ + toolsUsed(filter: ToolsUsedFilter): ToolsUsed! + + """deploymentRisk is the risk status of the deployment""" + deploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRisk + + """policyRunHistory is the policy execution history for this deployment""" + policyRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + artifactAggregate(filter: ArtifactFilter): ArtifactAggregateResult + policyRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ApplicationDeploymentAggregateResult { + count: Int + idMin: String + idMax: String + deployedAtMin: DateTime + deployedAtMax: DateTime + sourceMin: String + sourceMax: String + componentMin: String + componentMax: String + deployedByMin: String + deployedByMax: String +} + +input ApplicationDeploymentFilter { + id: StringHashFilter + deployedAt: DateTimeFilter + deploymentStage: DeploymentStage_exact + component: StringExactFilter_StringRegExpFilter + has: [ApplicationDeploymentHasFilter] + and: [ApplicationDeploymentFilter] + or: [ApplicationDeploymentFilter] + not: ApplicationDeploymentFilter +} + +enum ApplicationDeploymentHasFilter { + id + artifact + applicationEnvironment + deployedAt + deploymentStage + source + component + deployedBy + toolsUsed + deploymentRisk + policyRunHistory +} + +input ApplicationDeploymentOrder { + asc: ApplicationDeploymentOrderable + desc: ApplicationDeploymentOrderable + then: ApplicationDeploymentOrder +} + +enum ApplicationDeploymentOrderable { + id + deployedAt + source + component + deployedBy +} + +input ApplicationDeploymentPatch { + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +input ApplicationDeploymentRef { + """id is randomly assigned""" + id: String + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! +} + +type ApplicationDeploymentRiskAggregateResult { + count: Int + sourceCodeAlertsScoreMin: Int + sourceCodeAlertsScoreMax: Int + sourceCodeAlertsScoreSum: Int + sourceCodeAlertsScoreAvg: Float + buildAlertsScoreMin: Int + buildAlertsScoreMax: Int + buildAlertsScoreSum: Int + buildAlertsScoreAvg: Float + artifactAlertsScoreMin: Int + artifactAlertsScoreMax: Int + artifactAlertsScoreSum: Int + artifactAlertsScoreAvg: Float + deploymentAlertsScoreMin: Int + deploymentAlertsScoreMax: Int + deploymentAlertsScoreSum: Int + deploymentAlertsScoreAvg: Float +} + +input ApplicationDeploymentRiskFilter { + id: [ID!] + deploymentRiskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationDeploymentRiskHasFilter] + and: [ApplicationDeploymentRiskFilter] + or: [ApplicationDeploymentRiskFilter] + not: ApplicationDeploymentRiskFilter +} + +enum ApplicationDeploymentRiskHasFilter { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore + deploymentRiskStatus + applicationDeployment +} + +input ApplicationDeploymentRiskOrder { + asc: ApplicationDeploymentRiskOrderable + desc: ApplicationDeploymentRiskOrderable + then: ApplicationDeploymentRiskOrder +} + +enum ApplicationDeploymentRiskOrderable { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore +} + +input ApplicationDeploymentRiskPatch { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +input ApplicationDeploymentRiskRef { + id: ID + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment { + """id is randomly assigned""" + id: String! + + """environment denotes whether it is dev, prod, staging, non-prod etc""" + environment(filter: EnvironmentFilter): Environment + application(filter: ApplicationFilter): Application! + deploymentTarget(filter: DeploymentTargetFilter): DeploymentTarget! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + riskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatus + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + deploymentsAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationEnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + namespaceMin: String + namespaceMax: String +} + +input ApplicationEnvironmentFilter { + id: StringHashFilter + namespace: StringExactFilter_StringRegExpFilter + has: [ApplicationEnvironmentHasFilter] + and: [ApplicationEnvironmentFilter] + or: [ApplicationEnvironmentFilter] + not: ApplicationEnvironmentFilter +} + +enum ApplicationEnvironmentHasFilter { + id + environment + application + deploymentTarget + namespace + toolsUsed + deployments + riskStatus + metadata +} + +input ApplicationEnvironmentOrder { + asc: ApplicationEnvironmentOrderable + desc: ApplicationEnvironmentOrderable + then: ApplicationEnvironmentOrder +} + +enum ApplicationEnvironmentOrderable { + id + namespace +} + +input ApplicationEnvironmentPatch { + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationEnvironmentRef { + """id is randomly assigned""" + id: String + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + has: [ApplicationHasFilter] + and: [ApplicationFilter] + or: [ApplicationFilter] + not: ApplicationFilter +} + +enum ApplicationHasFilter { + id + name + roles + environments + team + policies + policyEnforcements + metadata +} + +input ApplicationOrder { + asc: ApplicationOrderable + desc: ApplicationOrderable + then: ApplicationOrder +} + +enum ApplicationOrderable { + id + name +} + +input ApplicationPatch { + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +input ApplicationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! +} + +type ApplicationRiskStatusAggregateResult { + count: Int + sourceCodeAlertsMin: Int + sourceCodeAlertsMax: Int + sourceCodeAlertsSum: Int + sourceCodeAlertsAvg: Float + buildAlertsMin: Int + buildAlertsMax: Int + buildAlertsSum: Int + buildAlertsAvg: Float + artifactAlertsMin: Int + artifactAlertsMax: Int + artifactAlertsSum: Int + artifactAlertsAvg: Float + deploymentAlertsMin: Int + deploymentAlertsMax: Int + deploymentAlertsSum: Int + deploymentAlertsAvg: Float + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input ApplicationRiskStatusFilter { + id: [ID!] + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationRiskStatusHasFilter] + and: [ApplicationRiskStatusFilter] + or: [ApplicationRiskStatusFilter] + not: ApplicationRiskStatusFilter +} + +enum ApplicationRiskStatusHasFilter { + riskStatus + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt + applicationEnvironment +} + +input ApplicationRiskStatusOrder { + asc: ApplicationRiskStatusOrderable + desc: ApplicationRiskStatusOrderable + then: ApplicationRiskStatusOrder +} + +enum ApplicationRiskStatusOrderable { + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt +} + +input ApplicationRiskStatusPatch { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +input ApplicationRiskStatusRef { + id: ID + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +type Artifact { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + artifactDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + buildDetails(filter: BuildToolFilter): BuildTool + scanDataAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + artifactDeploymentAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult +} + +type ArtifactAggregateResult { + count: Int + idMin: String + idMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactNameMin: String + artifactNameMax: String + artifactTagMin: String + artifactTagMax: String + artifactShaMin: String + artifactShaMax: String +} + +input ArtifactFilter { + id: StringHashFilter + artifactType: StringExactFilter + artifactName: StringExactFilter_StringRegExpFilter + artifactTag: StringExactFilter_StringRegExpFilter + artifactSha: StringExactFilter + has: [ArtifactHasFilter] + and: [ArtifactFilter] + or: [ArtifactFilter] + not: ArtifactFilter +} + +enum ArtifactHasFilter { + id + artifactType + artifactName + artifactTag + artifactSha + scanData + artifactDeployment + buildDetails +} + +input ArtifactOrder { + asc: ArtifactOrderable + desc: ArtifactOrderable + then: ArtifactOrder +} + +enum ArtifactOrderable { + id + artifactType + artifactName + artifactTag + artifactSha +} + +input ArtifactPatch { + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +input ArtifactRef { + id: String + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type ArtifactScanData { + id: String! + artifactSha: String! + tool: String! + artifactDetails(filter: ArtifactFilter): Artifact + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + componentsAggregate(filter: ComponentFilter): ComponentAggregateResult + artifactRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ArtifactScanDataAggregateResult { + count: Int + idMin: String + idMax: String + artifactShaMin: String + artifactShaMax: String + toolMin: String + toolMax: String + lastScannedAtMin: DateTime + lastScannedAtMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + vulnTrackingIdMin: String + vulnTrackingIdMax: String + vulnCriticalCountMin: Int + vulnCriticalCountMax: Int + vulnCriticalCountSum: Int + vulnCriticalCountAvg: Float + vulnHighCountMin: Int + vulnHighCountMax: Int + vulnHighCountSum: Int + vulnHighCountAvg: Float + vulnMediumCountMin: Int + vulnMediumCountMax: Int + vulnMediumCountSum: Int + vulnMediumCountAvg: Float + vulnLowCountMin: Int + vulnLowCountMax: Int + vulnLowCountSum: Int + vulnLowCountAvg: Float + vulnInfoCountMin: Int + vulnInfoCountMax: Int + vulnInfoCountSum: Int + vulnInfoCountAvg: Float + vulnUnknownCountMin: Int + vulnUnknownCountMax: Int + vulnUnknownCountSum: Int + vulnUnknownCountAvg: Float + vulnNoneCountMin: Int + vulnNoneCountMax: Int + vulnNoneCountSum: Int + vulnNoneCountAvg: Float + vulnTotalCountMin: Int + vulnTotalCountMax: Int + vulnTotalCountSum: Int + vulnTotalCountAvg: Float + sbomUrlMin: String + sbomUrlMax: String + artifactLicenseScanUrlMin: String + artifactLicenseScanUrlMax: String + artifactSecretScanUrlMin: String + artifactSecretScanUrlMax: String + sourceLicenseScanUrlMin: String + sourceLicenseScanUrlMax: String + sourceSecretScanUrlMin: String + sourceSecretScanUrlMax: String + sourceScorecardScanUrlMin: String + sourceScorecardScanUrlMax: String + sourceSemgrepHighSeverityScanUrlMin: String + sourceSemgrepHighSeverityScanUrlMax: String + sourceSemgrepMediumSeverityScanUrlMin: String + sourceSemgrepMediumSeverityScanUrlMax: String + sourceSemgrepLowSeverityScanUrlMin: String + sourceSemgrepLowSeverityScanUrlMax: String + sourceSnykScanUrlMin: String + sourceSnykScanUrlMax: String + virusTotalUrlScanMin: String + virusTotalUrlScanMax: String +} + +input ArtifactScanDataFilter { + id: StringHashFilter + artifactSha: StringExactFilter + tool: StringExactFilter + vulnCriticalCount: IntFilter + vulnHighCount: IntFilter + vulnMediumCount: IntFilter + vulnLowCount: IntFilter + vulnInfoCount: IntFilter + vulnUnknownCount: IntFilter + vulnNoneCount: IntFilter + vulnTotalCount: IntFilter + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ArtifactScanDataHasFilter] + and: [ArtifactScanDataFilter] + or: [ArtifactScanDataFilter] + not: ArtifactScanDataFilter +} + +enum ArtifactScanDataHasFilter { + id + artifactSha + tool + artifactDetails + lastScannedAt + createdAt + vulnTrackingId + components + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan + riskStatus + artifactRunHistory +} + +input ArtifactScanDataOrder { + asc: ArtifactScanDataOrderable + desc: ArtifactScanDataOrderable + then: ArtifactScanDataOrder +} + +enum ArtifactScanDataOrderable { + id + artifactSha + tool + lastScannedAt + createdAt + vulnTrackingId + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan +} + +input ArtifactScanDataPatch { + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input ArtifactScanDataRef { + id: String + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input AuthRule { + and: [AuthRule] + or: [AuthRule] + not: AuthRule + rule: String +} + +"""BuildTool contains data from build tool events.""" +type BuildTool { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + + """artifactNode links a BuildTool node to an artifact""" + artifactNode(filter: ArtifactFilter): Artifact + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + + """sourceCodeTool links a BuildTool node to the source details""" + sourceCodeTool(filter: SourceCodeToolFilter): SourceCodeTool + + """commitMetaData links a BuildTool node to the git commit based details""" + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData!] + createdAt: DateTime! + commitMetaDataAggregate(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult +} + +type BuildToolAggregateResult { + count: Int + idMin: String + idMax: String + buildIdMin: String + buildIdMax: String + toolMin: String + toolMax: String + buildNameMin: String + buildNameMax: String + buildUrlMin: String + buildUrlMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactMin: String + artifactMax: String + artifactTagMin: String + artifactTagMax: String + digestMin: String + digestMax: String + buildDigestMin: String + buildDigestMax: String + buildTimeMin: DateTime + buildTimeMax: DateTime + buildUserMin: String + buildUserMax: String + createdAtMin: DateTime + createdAtMax: DateTime +} + +input BuildToolFilter { + id: StringHashFilter + buildId: StringExactFilter_StringRegExpFilter + tool: StringExactFilter + buildName: StringExactFilter_StringRegExpFilter + buildUrl: StringExactFilter + artifactType: StringExactFilter + artifact: StringExactFilter + artifactTag: StringExactFilter + digest: StringExactFilter + buildDigest: StringExactFilter + has: [BuildToolHasFilter] + and: [BuildToolFilter] + or: [BuildToolFilter] + not: BuildToolFilter +} + +enum BuildToolHasFilter { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + artifactNode + buildTime + buildUser + sourceCodeTool + commitMetaData + createdAt +} + +input BuildToolOrder { + asc: BuildToolOrderable + desc: BuildToolOrderable + then: BuildToolOrder +} + +enum BuildToolOrderable { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + buildTime + buildUser + createdAt +} + +input BuildToolPatch { + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +input BuildToolRef { + """id is randomly assigned""" + id: String + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + """id is randomly assigned""" + id: ID! + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool(filter: BuildToolFilter): BuildTool! +} + +type CommitMetaDataAggregateResult { + count: Int + commitMin: String + commitMax: String + repositoryMin: String + repositoryMax: String + noOfReviewersConfMin: Int + noOfReviewersConfMax: Int + noOfReviewersConfSum: Int + noOfReviewersConfAvg: Float +} + +input CommitMetaDataFilter { + id: [ID!] + has: [CommitMetaDataHasFilter] + and: [CommitMetaDataFilter] + or: [CommitMetaDataFilter] + not: CommitMetaDataFilter +} + +enum CommitMetaDataHasFilter { + commit + repository + commitSign + noOfReviewersConf + reviewerList + approverList + buildTool +} + +input CommitMetaDataOrder { + asc: CommitMetaDataOrderable + desc: CommitMetaDataOrderable + then: CommitMetaDataOrder +} + +enum CommitMetaDataOrderable { + commit + repository + noOfReviewersConf +} + +input CommitMetaDataPatch { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +input CommitMetaDataRef { + """id is randomly assigned""" + id: ID + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +type Component { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability!] + artifacts(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + vulnerabilitiesAggregate(filter: VulnerabilityFilter): VulnerabilityAggregateResult + artifactsAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ComponentAggregateResult { + count: Int + idMin: String + idMax: String + typeMin: String + typeMax: String + nameMin: String + nameMax: String + versionMin: String + versionMax: String + purlMin: String + purlMax: String + cpeMin: String + cpeMax: String + scannedAtMin: DateTime + scannedAtMax: DateTime +} + +input ComponentFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + version: StringExactFilter_StringRegExpFilter + purl: StringExactFilter + cpe: StringExactFilter + has: [ComponentHasFilter] + and: [ComponentFilter] + or: [ComponentFilter] + not: ComponentFilter +} + +enum ComponentHasFilter { + id + type + name + version + licenses + purl + cpe + scannedAt + vulnerabilities + artifacts +} + +input ComponentOrder { + asc: ComponentOrderable + desc: ComponentOrderable + then: ComponentOrder +} + +enum ComponentOrderable { + id + type + name + version + purl + cpe + scannedAt +} + +input ComponentPatch { + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ComponentRef { + id: String + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ContainsFilter { + point: PointRef + polygon: PolygonRef +} + +type Credentials { + id: ID! + data: String! + integrator(filter: IntegratorFilter): Integrator! +} + +type CredentialsAggregateResult { + count: Int + dataMin: String + dataMax: String +} + +input CredentialsFilter { + id: [ID!] + has: [CredentialsHasFilter] + and: [CredentialsFilter] + or: [CredentialsFilter] + not: CredentialsFilter +} + +enum CredentialsHasFilter { + data + integrator +} + +input CredentialsOrder { + asc: CredentialsOrderable + desc: CredentialsOrderable + then: CredentialsOrder +} + +enum CredentialsOrderable { + data +} + +input CredentialsPatch { + data: String + integrator: IntegratorRef +} + +input CredentialsRef { + id: ID + data: String + integrator: IntegratorRef +} + +input CustomHTTP { + url: String! + method: HTTPMethod! + body: String + graphql: String + mode: Mode + forwardHeaders: [String!] + secretHeaders: [String!] + introspectionHeaders: [String!] + skipIntrospection: Boolean +} + +type CWE { + id: String! + name: String! + description: String +} + +type CWEAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + descriptionMin: String + descriptionMax: String +} + +input CWEFilter { + id: StringHashFilter + has: [CWEHasFilter] + and: [CWEFilter] + or: [CWEFilter] + not: CWEFilter +} + +enum CWEHasFilter { + id + name + description +} + +input CWEOrder { + asc: CWEOrderable + desc: CWEOrderable + then: CWEOrder +} + +enum CWEOrderable { + id + name + description +} + +input CWEPatch { + name: String + description: String +} + +input CWERef { + id: String + name: String + description: String +} + +""" +The DateTime scalar type represents date and time as a string in RFC3339 format. +For example: "1985-04-12T23:20:50.52Z" represents 20 mins 50.52 secs after the 23rd hour of Apr 12th 1985 in UTC. +""" +scalar DateTime + +input DateTimeFilter { + eq: DateTime + in: [DateTime] + le: DateTime + lt: DateTime + ge: DateTime + gt: DateTime + between: DateTimeRange +} + +input DateTimeRange { + min: DateTime! + max: DateTime! +} + +type DeleteApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + msg: String + numUids: Int +} + +type DeleteApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + msg: String + numUids: Int +} + +type DeleteApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + msg: String + numUids: Int +} + +type DeleteApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + msg: String + numUids: Int +} + +type DeleteApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + msg: String + numUids: Int +} + +type DeleteArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + msg: String + numUids: Int +} + +type DeleteArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + msg: String + numUids: Int +} + +type DeleteBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + msg: String + numUids: Int +} + +type DeleteCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + msg: String + numUids: Int +} + +type DeleteComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + msg: String + numUids: Int +} + +type DeleteCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + msg: String + numUids: Int +} + +type DeleteCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + msg: String + numUids: Int +} + +type DeleteDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + msg: String + numUids: Int +} + +type DeleteEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + msg: String + numUids: Int +} + +type DeleteFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + msg: String + numUids: Int +} + +type DeleteIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + msg: String + numUids: Int +} + +type DeleteKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + msg: String + numUids: Int +} + +type DeleteOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + msg: String + numUids: Int +} + +type DeletePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + msg: String + numUids: Int +} + +type DeletePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + msg: String + numUids: Int +} + +type DeleteRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + msg: String + numUids: Int +} + +type DeleteRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + msg: String + numUids: Int +} + +type DeleteRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + msg: String + numUids: Int +} + +type DeleteSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + msg: String + numUids: Int +} + +type DeleteSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + msg: String + numUids: Int +} + +type DeleteSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + msg: String + numUids: Int +} + +type DeleteTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + msg: String + numUids: Int +} + +type DeleteTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + msg: String + numUids: Int +} + +type DeleteToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + msg: String + numUids: Int +} + +type DeleteVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + msg: String + numUids: Int +} + +"""DeploymentStage is an enum denoting the stage of the deployment. .""" +enum DeploymentStage { + """deployment is discovered from the events""" + discovered + + """scanning is under process""" + scanning + + """ + deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + """ + current + + """ + deployment becomes a past deployment because another fresh deployment has happened + """ + previous + + """deployment is blocked by the firewall""" + blocked +} + +input DeploymentStage_exact { + eq: DeploymentStage + in: [DeploymentStage] + le: DeploymentStage + lt: DeploymentStage + ge: DeploymentStage + gt: DeploymentStage + between: DeploymentStage +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization(filter: OrganizationFilter): Organization! + defaultEnvironment(filter: EnvironmentFilter): Environment! +} + +type DeploymentTargetAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + ipMin: String + ipMax: String + accountMin: String + accountMax: String + targetTypeMin: String + targetTypeMax: String + regionMin: String + regionMax: String + kubescapeServiceConnectedMin: String + kubescapeServiceConnectedMax: String +} + +input DeploymentTargetFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + ip: StringExactFilter + has: [DeploymentTargetHasFilter] + and: [DeploymentTargetFilter] + or: [DeploymentTargetFilter] + not: DeploymentTargetFilter +} + +enum DeploymentTargetHasFilter { + id + name + ip + account + targetType + region + kubescapeServiceConnected + isFirewall + organization + defaultEnvironment +} + +input DeploymentTargetOrder { + asc: DeploymentTargetOrderable + desc: DeploymentTargetOrderable + then: DeploymentTargetOrder +} + +enum DeploymentTargetOrderable { + id + name + ip + account + targetType + region + kubescapeServiceConnected +} + +input DeploymentTargetPatch { + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +input DeploymentTargetRef { + """id is randomly assigned""" + id: String + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +enum DgraphIndex { + int + int64 + float + bool + hash + exact + term + fulltext + trigram + regexp + year + month + day + hour + geo +} + +"""Environment can be things like dev, prod, staging etc.""" +type Environment { + id: String! + organization(filter: OrganizationFilter): Organization! + purpose: String! +} + +type EnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + purposeMin: String + purposeMax: String +} + +input EnvironmentFilter { + id: StringHashFilter + purpose: StringExactFilter + has: [EnvironmentHasFilter] + and: [EnvironmentFilter] + or: [EnvironmentFilter] + not: EnvironmentFilter +} + +enum EnvironmentHasFilter { + id + organization + purpose +} + +input EnvironmentOrder { + asc: EnvironmentOrderable + desc: EnvironmentOrderable + then: EnvironmentOrder +} + +enum EnvironmentOrderable { + id + purpose +} + +input EnvironmentPatch { + organization: OrganizationRef + purpose: String +} + +input EnvironmentRef { + id: String + organization: OrganizationRef + purpose: String +} + +type FeatureMode { + id: String! + organization(filter: OrganizationFilter): Organization! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type FeatureModeAggregateResult { + count: Int + idMin: String + idMax: String + scanMin: String + scanMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input FeatureModeFilter { + id: StringHashFilter + scan: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [FeatureModeHasFilter] + and: [FeatureModeFilter] + or: [FeatureModeFilter] + not: FeatureModeFilter +} + +enum FeatureModeHasFilter { + id + organization + scan + type + enabled + category + createdAt + updatedAt +} + +input FeatureModeOrder { + asc: FeatureModeOrderable + desc: FeatureModeOrderable + then: FeatureModeOrder +} + +enum FeatureModeOrderable { + id + scan + type + category + createdAt + updatedAt +} + +input FeatureModePatch { + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FeatureModeRef { + id: String + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FloatFilter { + eq: Float + in: [Float] + le: Float + lt: Float + ge: Float + gt: Float + between: FloatRange +} + +input FloatRange { + min: Float! + max: Float! +} + +input GenerateMutationParams { + add: Boolean + update: Boolean + delete: Boolean +} + +input GenerateQueryParams { + get: Boolean + query: Boolean + password: Boolean + aggregate: Boolean +} + +enum HTTPMethod { + GET + POST + PUT + PATCH + DELETE +} + +""" +The Int64 scalar type represents a signed 64‐bit numeric non‐fractional value. +Int64 can represent values in range [-(2^63),(2^63 - 1)]. +""" +scalar Int64 + +input Int64Filter { + eq: Int64 + in: [Int64] + le: Int64 + lt: Int64 + ge: Int64 + gt: Int64 + between: Int64Range +} + +input Int64Range { + min: Int64! + max: Int64! +} + +type Integrator { + id: String! + organization(filter: OrganizationFilter): Organization! + name: String! + type: String! + category: String! + credentials(filter: CredentialsFilter): Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type IntegratorAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input IntegratorFilter { + id: StringHashFilter + name: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [IntegratorHasFilter] + and: [IntegratorFilter] + or: [IntegratorFilter] + not: IntegratorFilter +} + +enum IntegratorHasFilter { + id + organization + name + type + category + credentials + createdAt + updatedAt +} + +input IntegratorOrder { + asc: IntegratorOrderable + desc: IntegratorOrderable + then: IntegratorOrder +} + +enum IntegratorOrderable { + id + name + type + category + createdAt + updatedAt +} + +input IntegratorPatch { + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntegratorRef { + id: String + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntersectsFilter { + polygon: PolygonRef + multiPolygon: MultiPolygonRef +} + +input IntFilter { + eq: Int + in: [Int] + le: Int + lt: Int + ge: Int + gt: Int + between: IntRange +} + +input IntRange { + min: Int! + max: Int! +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! + name: String! + value: String! +} + +type KeyValueAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + valueMin: String + valueMax: String +} + +input KeyValueFilter { + id: StringHashFilter + has: [KeyValueHasFilter] + and: [KeyValueFilter] + or: [KeyValueFilter] + not: KeyValueFilter +} + +enum KeyValueHasFilter { + id + name + value +} + +input KeyValueOrder { + asc: KeyValueOrderable + desc: KeyValueOrderable + then: KeyValueOrder +} + +enum KeyValueOrderable { + id + name + value +} + +input KeyValuePatch { + name: String + value: String +} + +input KeyValueRef { + id: String + name: String + value: String +} + +enum Mode { + BATCH + SINGLE +} + +type MultiPolygon { + polygons: [Polygon!]! +} + +input MultiPolygonRef { + polygons: [PolygonRef!]! +} + +type Mutation { + addSchemaVersion(input: [AddSchemaVersionInput!]!): AddSchemaVersionPayload + updateSchemaVersion(input: UpdateSchemaVersionInput!): UpdateSchemaVersionPayload + deleteSchemaVersion(filter: SchemaVersionFilter!): DeleteSchemaVersionPayload + updateRBAC(input: UpdateRBACInput!): UpdateRBACPayload + deleteRBAC(filter: RBACFilter!): DeleteRBACPayload + addRole(input: [AddRoleInput!]!, upsert: Boolean): AddRolePayload + updateRole(input: UpdateRoleInput!): UpdateRolePayload + deleteRole(filter: RoleFilter!): DeleteRolePayload + addKeyValue(input: [AddKeyValueInput!]!, upsert: Boolean): AddKeyValuePayload + updateKeyValue(input: UpdateKeyValueInput!): UpdateKeyValuePayload + deleteKeyValue(filter: KeyValueFilter!): DeleteKeyValuePayload + addOrganization(input: [AddOrganizationInput!]!, upsert: Boolean): AddOrganizationPayload + updateOrganization(input: UpdateOrganizationInput!): UpdateOrganizationPayload + deleteOrganization(filter: OrganizationFilter!): DeleteOrganizationPayload + addEnvironment(input: [AddEnvironmentInput!]!, upsert: Boolean): AddEnvironmentPayload + updateEnvironment(input: UpdateEnvironmentInput!): UpdateEnvironmentPayload + deleteEnvironment(filter: EnvironmentFilter!): DeleteEnvironmentPayload + addDeploymentTarget(input: [AddDeploymentTargetInput!]!, upsert: Boolean): AddDeploymentTargetPayload + updateDeploymentTarget(input: UpdateDeploymentTargetInput!): UpdateDeploymentTargetPayload + deleteDeploymentTarget(filter: DeploymentTargetFilter!): DeleteDeploymentTargetPayload + addTeam(input: [AddTeamInput!]!, upsert: Boolean): AddTeamPayload + updateTeam(input: UpdateTeamInput!): UpdateTeamPayload + deleteTeam(filter: TeamFilter!): DeleteTeamPayload + addApplication(input: [AddApplicationInput!]!, upsert: Boolean): AddApplicationPayload + updateApplication(input: UpdateApplicationInput!): UpdateApplicationPayload + deleteApplication(filter: ApplicationFilter!): DeleteApplicationPayload + addApplicationEnvironment(input: [AddApplicationEnvironmentInput!]!, upsert: Boolean): AddApplicationEnvironmentPayload + updateApplicationEnvironment(input: UpdateApplicationEnvironmentInput!): UpdateApplicationEnvironmentPayload + deleteApplicationEnvironment(filter: ApplicationEnvironmentFilter!): DeleteApplicationEnvironmentPayload + addApplicationRiskStatus(input: [AddApplicationRiskStatusInput!]!): AddApplicationRiskStatusPayload + updateApplicationRiskStatus(input: UpdateApplicationRiskStatusInput!): UpdateApplicationRiskStatusPayload + deleteApplicationRiskStatus(filter: ApplicationRiskStatusFilter!): DeleteApplicationRiskStatusPayload + addApplicationDeployment(input: [AddApplicationDeploymentInput!]!, upsert: Boolean): AddApplicationDeploymentPayload + updateApplicationDeployment(input: UpdateApplicationDeploymentInput!): UpdateApplicationDeploymentPayload + deleteApplicationDeployment(filter: ApplicationDeploymentFilter!): DeleteApplicationDeploymentPayload + addToolsUsed(input: [AddToolsUsedInput!]!): AddToolsUsedPayload + updateToolsUsed(input: UpdateToolsUsedInput!): UpdateToolsUsedPayload + deleteToolsUsed(filter: ToolsUsedFilter!): DeleteToolsUsedPayload + addApplicationDeploymentRisk(input: [AddApplicationDeploymentRiskInput!]!): AddApplicationDeploymentRiskPayload + updateApplicationDeploymentRisk(input: UpdateApplicationDeploymentRiskInput!): UpdateApplicationDeploymentRiskPayload + deleteApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter!): DeleteApplicationDeploymentRiskPayload + addIntegrator(input: [AddIntegratorInput!]!, upsert: Boolean): AddIntegratorPayload + updateIntegrator(input: UpdateIntegratorInput!): UpdateIntegratorPayload + deleteIntegrator(filter: IntegratorFilter!): DeleteIntegratorPayload + addCredentials(input: [AddCredentialsInput!]!): AddCredentialsPayload + updateCredentials(input: UpdateCredentialsInput!): UpdateCredentialsPayload + deleteCredentials(filter: CredentialsFilter!): DeleteCredentialsPayload + addFeatureMode(input: [AddFeatureModeInput!]!, upsert: Boolean): AddFeatureModePayload + updateFeatureMode(input: UpdateFeatureModeInput!): UpdateFeatureModePayload + deleteFeatureMode(filter: FeatureModeFilter!): DeleteFeatureModePayload + addTag(input: [AddTagInput!]!, upsert: Boolean): AddTagPayload + updateTag(input: UpdateTagInput!): UpdateTagPayload + deleteTag(filter: TagFilter!): DeleteTagPayload + addPolicyDefinition(input: [AddPolicyDefinitionInput!]!, upsert: Boolean): AddPolicyDefinitionPayload + updatePolicyDefinition(input: UpdatePolicyDefinitionInput!): UpdatePolicyDefinitionPayload + deletePolicyDefinition(filter: PolicyDefinitionFilter!): DeletePolicyDefinitionPayload + addPolicyEnforcement(input: [AddPolicyEnforcementInput!]!): AddPolicyEnforcementPayload + updatePolicyEnforcement(input: UpdatePolicyEnforcementInput!): UpdatePolicyEnforcementPayload + deletePolicyEnforcement(filter: PolicyEnforcementFilter!): DeletePolicyEnforcementPayload + addRunHistory(input: [AddRunHistoryInput!]!): AddRunHistoryPayload + updateRunHistory(input: UpdateRunHistoryInput!): UpdateRunHistoryPayload + deleteRunHistory(filter: RunHistoryFilter!): DeleteRunHistoryPayload + addSecurityIssue(input: [AddSecurityIssueInput!]!): AddSecurityIssuePayload + updateSecurityIssue(input: UpdateSecurityIssueInput!): UpdateSecurityIssuePayload + deleteSecurityIssue(filter: SecurityIssueFilter!): DeleteSecurityIssuePayload + addBuildTool(input: [AddBuildToolInput!]!, upsert: Boolean): AddBuildToolPayload + updateBuildTool(input: UpdateBuildToolInput!): UpdateBuildToolPayload + deleteBuildTool(filter: BuildToolFilter!): DeleteBuildToolPayload + addSourceCodeTool(input: [AddSourceCodeToolInput!]!, upsert: Boolean): AddSourceCodeToolPayload + updateSourceCodeTool(input: UpdateSourceCodeToolInput!): UpdateSourceCodeToolPayload + deleteSourceCodeTool(filter: SourceCodeToolFilter!): DeleteSourceCodeToolPayload + addCommitMetaData(input: [AddCommitMetaDataInput!]!): AddCommitMetaDataPayload + updateCommitMetaData(input: UpdateCommitMetaDataInput!): UpdateCommitMetaDataPayload + deleteCommitMetaData(filter: CommitMetaDataFilter!): DeleteCommitMetaDataPayload + addArtifact(input: [AddArtifactInput!]!, upsert: Boolean): AddArtifactPayload + updateArtifact(input: UpdateArtifactInput!): UpdateArtifactPayload + deleteArtifact(filter: ArtifactFilter!): DeleteArtifactPayload + addArtifactScanData(input: [AddArtifactScanDataInput!]!, upsert: Boolean): AddArtifactScanDataPayload + updateArtifactScanData(input: UpdateArtifactScanDataInput!): UpdateArtifactScanDataPayload + deleteArtifactScanData(filter: ArtifactScanDataFilter!): DeleteArtifactScanDataPayload + addComponent(input: [AddComponentInput!]!, upsert: Boolean): AddComponentPayload + updateComponent(input: UpdateComponentInput!): UpdateComponentPayload + deleteComponent(filter: ComponentFilter!): DeleteComponentPayload + addVulnerability(input: [AddVulnerabilityInput!]!, upsert: Boolean): AddVulnerabilityPayload + updateVulnerability(input: UpdateVulnerabilityInput!): UpdateVulnerabilityPayload + deleteVulnerability(filter: VulnerabilityFilter!): DeleteVulnerabilityPayload + addCWE(input: [AddCWEInput!]!, upsert: Boolean): AddCWEPayload + updateCWE(input: UpdateCWEInput!): UpdateCWEPayload + deleteCWE(filter: CWEFilter!): DeleteCWEPayload +} + +input NearFilter { + distance: Float! + coordinate: PointRef! +} + +type Organization implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + teams(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team!] + environments(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + integrators(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator!] + featureModes(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + teamsAggregate(filter: TeamFilter): TeamAggregateResult + environmentsAggregate(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + integratorsAggregate(filter: IntegratorFilter): IntegratorAggregateResult + featureModesAggregate(filter: FeatureModeFilter): FeatureModeAggregateResult +} + +type OrganizationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input OrganizationFilter { + id: StringHashFilter + name: StringExactFilter + has: [OrganizationHasFilter] + and: [OrganizationFilter] + or: [OrganizationFilter] + not: OrganizationFilter +} + +enum OrganizationHasFilter { + id + name + roles + teams + environments + policies + policyEnforcements + integrators + featureModes +} + +input OrganizationOrder { + asc: OrganizationOrderable + desc: OrganizationOrderable + then: OrganizationOrder +} + +enum OrganizationOrderable { + id + name +} + +input OrganizationPatch { + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +input OrganizationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type Point { + longitude: Float! + latitude: Float! +} + +input PointGeoFilter { + near: NearFilter + within: WithinFilter +} + +type PointList { + points: [Point!]! +} + +input PointListRef { + points: [PointRef!]! +} + +input PointRef { + longitude: Float! + latitude: Float! +} + +type PolicyDefinition { + id: String! + ownerOrg(filter: OrganizationFilter): Organization! + ownerTeam(filter: TeamFilter): Team + ownerApplication(filter: ApplicationFilter): Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyDefinitionAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime + policyNameMin: String + policyNameMax: String + categoryMin: String + categoryMax: String + stageMin: String + stageMax: String + descriptionMin: String + descriptionMax: String + scriptMin: String + scriptMax: String + variablesMin: String + variablesMax: String + conditionNameMin: String + conditionNameMax: String + suggestionMin: String + suggestionMax: String +} + +input PolicyDefinitionFilter { + id: StringHashFilter + policyName: StringExactFilter + category: StringExactFilter + stage: StringExactFilter + description: StringExactFilter + scheduledPolicy: Boolean + script: StringExactFilter + variables: StringExactFilter + conditionName: StringExactFilter + suggestion: StringExactFilter + has: [PolicyDefinitionHasFilter] + and: [PolicyDefinitionFilter] + or: [PolicyDefinitionFilter] + not: PolicyDefinitionFilter +} + +enum PolicyDefinitionHasFilter { + id + ownerOrg + ownerTeam + ownerApplication + createdAt + updatedAt + policyName + category + stage + description + scheduledPolicy + script + variables + conditionName + suggestion +} + +input PolicyDefinitionOrder { + asc: PolicyDefinitionOrderable + desc: PolicyDefinitionOrderable + then: PolicyDefinitionOrder +} + +enum PolicyDefinitionOrderable { + id + createdAt + updatedAt + policyName + category + stage + description + script + variables + conditionName + suggestion +} + +input PolicyDefinitionPatch { + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +input PolicyDefinitionRef { + id: String + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy(filter: PolicyDefinitionFilter): PolicyDefinition! + enforcedOrg(filter: OrganizationFilter): Organization + enforcedTeam(filter: TeamFilter): Team + enforcedApplication(filter: ApplicationFilter): Application + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment!] + tags(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag!] + createdAt: DateTime! + updatedAt: DateTime! + environmentsAggregate(filter: EnvironmentFilter): EnvironmentAggregateResult + tagsAggregate(filter: TagFilter): TagAggregateResult +} + +type PolicyEnforcementAggregateResult { + count: Int + datasourceToolMin: String + datasourceToolMax: String + actionMin: String + actionMax: String + conditionValueMin: String + conditionValueMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input PolicyEnforcementFilter { + id: [ID!] + status: Boolean + forceApply: Boolean + datasourceTool: StringExactFilter + action: StringExactFilter + conditionValue: StringExactFilter + has: [PolicyEnforcementHasFilter] + and: [PolicyEnforcementFilter] + or: [PolicyEnforcementFilter] + not: PolicyEnforcementFilter +} + +enum PolicyEnforcementHasFilter { + policy + enforcedOrg + enforcedTeam + enforcedApplication + status + forceApply + severity + datasourceTool + action + conditionValue + environments + tags + createdAt + updatedAt +} + +input PolicyEnforcementOrder { + asc: PolicyEnforcementOrderable + desc: PolicyEnforcementOrderable + then: PolicyEnforcementOrder +} + +enum PolicyEnforcementOrderable { + datasourceTool + action + conditionValue + createdAt + updatedAt +} + +input PolicyEnforcementPatch { + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +input PolicyEnforcementRef { + id: ID + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +type Polygon { + coordinates: [PointList!]! +} + +input PolygonGeoFilter { + near: NearFilter + within: WithinFilter + contains: ContainsFilter + intersects: IntersectsFilter +} + +input PolygonRef { + coordinates: [PointListRef!]! +} + +type Query { + querySchemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + aggregateSchemaVersion(filter: SchemaVersionFilter): SchemaVersionAggregateResult + queryRBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + aggregateRBAC(filter: RBACFilter): RBACAggregateResult + getRole(id: String!): Role + queryRole(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + aggregateRole(filter: RoleFilter): RoleAggregateResult + getKeyValue(id: String!): KeyValue + queryKeyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + aggregateKeyValue(filter: KeyValueFilter): KeyValueAggregateResult + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getEnvironment(id: String!): Environment + queryEnvironment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + aggregateEnvironment(filter: EnvironmentFilter): EnvironmentAggregateResult + getDeploymentTarget(id: String!): DeploymentTarget + queryDeploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + aggregateDeploymentTarget(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + getApplicationRiskStatus(id: ID!): ApplicationRiskStatus + queryApplicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + aggregateApplicationRiskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatusAggregateResult + getApplicationDeployment(id: String!): ApplicationDeployment + queryApplicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + aggregateApplicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + getToolsUsed(id: ID!): ToolsUsed + queryToolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + aggregateToolsUsed(filter: ToolsUsedFilter): ToolsUsedAggregateResult + getApplicationDeploymentRisk(id: ID!): ApplicationDeploymentRisk + queryApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + aggregateApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRiskAggregateResult + getIntegrator(id: String!): Integrator + queryIntegrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + aggregateIntegrator(filter: IntegratorFilter): IntegratorAggregateResult + getCredentials(id: ID!): Credentials + queryCredentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + aggregateCredentials(filter: CredentialsFilter): CredentialsAggregateResult + getFeatureMode(id: String!): FeatureMode + queryFeatureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + aggregateFeatureMode(filter: FeatureModeFilter): FeatureModeAggregateResult + getTag(id: String!): Tag + queryTag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + aggregateTag(filter: TagFilter): TagAggregateResult + getPolicyDefinition(id: String!): PolicyDefinition + queryPolicyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + aggregatePolicyDefinition(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + getPolicyEnforcement(id: ID!): PolicyEnforcement + queryPolicyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + aggregatePolicyEnforcement(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + getRunHistory(id: ID!): RunHistory + queryRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + aggregateRunHistory(filter: RunHistoryFilter): RunHistoryAggregateResult + getSecurityIssue(id: ID!): SecurityIssue + querySecurityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + aggregateSecurityIssue(filter: SecurityIssueFilter): SecurityIssueAggregateResult + getBuildTool(id: String!): BuildTool + queryBuildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + aggregateBuildTool(filter: BuildToolFilter): BuildToolAggregateResult + getSourceCodeTool(id: String!): SourceCodeTool + querySourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + aggregateSourceCodeTool(filter: SourceCodeToolFilter): SourceCodeToolAggregateResult + getCommitMetaData(id: ID!): CommitMetaData + queryCommitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + aggregateCommitMetaData(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult + getArtifact(id: String!): Artifact + queryArtifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + aggregateArtifact(filter: ArtifactFilter): ArtifactAggregateResult + getArtifactScanData(id: String!): ArtifactScanData + queryArtifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + aggregateArtifactScanData(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + getComponent(id: String!): Component + queryComponent(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + aggregateComponent(filter: ComponentFilter): ComponentAggregateResult + getVulnerability(id: String!): Vulnerability + queryVulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + aggregateVulnerability(filter: VulnerabilityFilter): VulnerabilityAggregateResult + getCWE(id: String!): CWE + queryCWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + aggregateCWE(filter: CWEFilter): CWEAggregateResult +} + +interface RBAC { + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult +} + +type RBACAggregateResult { + count: Int +} + +input RBACFilter { + has: [RBACHasFilter] + and: [RBACFilter] + or: [RBACFilter] + not: RBACFilter +} + +enum RBACHasFilter { + roles +} + +input RBACPatch { + roles: [RoleRef!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + scanning +} + +input RiskStatus_exact { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus +} + +input RiskStatus_exact_StringRegExpFilter { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus + regexp: String +} + +type Role { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type RoleAggregateResult { + count: Int + idMin: String + idMax: String + groupMin: String + groupMax: String +} + +input RoleFilter { + id: StringHashFilter + group: StringHashFilter + permission: RolePermission_hash + has: [RoleHasFilter] + and: [RoleFilter] + or: [RoleFilter] + not: RoleFilter +} + +enum RoleHasFilter { + id + group + permission +} + +input RoleOrder { + asc: RoleOrderable + desc: RoleOrderable + then: RoleOrder +} + +enum RoleOrderable { + id + group +} + +input RolePatch { + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +enum RolePermission { + admin + write + read +} + +input RolePermission_hash { + eq: RolePermission + in: [RolePermission] +} + +input RoleRef { + """id is randomly assigned""" + id: String + + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +type RunHistory { + id: ID! + policyId: String! + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment + artifactScan(filter: ArtifactScanDataFilter): ArtifactScanData + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements(filter: PolicyEnforcementFilter): PolicyEnforcement! + securityIssue(filter: SecurityIssueFilter): SecurityIssue +} + +type RunHistoryAggregateResult { + count: Int + policyIdMin: String + policyIdMax: String + PolicyNameMin: String + PolicyNameMax: String + StageMin: String + StageMax: String + ArtifactMin: String + ArtifactMax: String + ArtifactTagMin: String + ArtifactTagMax: String + ArtifactShaMin: String + ArtifactShaMax: String + ArtifactNameTagMin: String + ArtifactNameTagMax: String + DatasourceToolMin: String + DatasourceToolMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + DeployedAtMin: DateTime + DeployedAtMax: DateTime + HashMin: String + HashMax: String + MetaDataMin: String + MetaDataMax: String + FileApiMin: String + FileApiMax: String +} + +input RunHistoryFilter { + id: [ID!] + policyId: StringExactFilter + PolicyName: StringExactFilter + Stage: StringExactFilter + Artifact: StringExactFilter + ArtifactTag: StringExactFilter + ArtifactSha: StringExactFilter + ArtifactNameTag: StringExactFilter_StringRegExpFilter + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + DeployedAt: DateTimeFilter + Pass: Boolean + scheduledPolicy: Boolean + has: [RunHistoryHasFilter] + and: [RunHistoryFilter] + or: [RunHistoryFilter] + not: RunHistoryFilter +} + +enum RunHistoryHasFilter { + policyId + applicationDeployment + artifactScan + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + Pass + MetaData + FileApi + scheduledPolicy + policyEnforcements + securityIssue +} + +input RunHistoryOrder { + asc: RunHistoryOrderable + desc: RunHistoryOrderable + then: RunHistoryOrder +} + +enum RunHistoryOrderable { + policyId + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + MetaData + FileApi +} + +input RunHistoryPatch { + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +input RunHistoryRef { + id: ID + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +type SchemaVersion { + version: String! +} + +type SchemaVersionAggregateResult { + count: Int + versionMin: String + versionMax: String +} + +input SchemaVersionFilter { + has: [SchemaVersionHasFilter] + and: [SchemaVersionFilter] + or: [SchemaVersionFilter] + not: SchemaVersionFilter +} + +enum SchemaVersionHasFilter { + version +} + +input SchemaVersionOrder { + asc: SchemaVersionOrderable + desc: SchemaVersionOrderable + then: SchemaVersionOrder +} + +enum SchemaVersionOrderable { + version +} + +input SchemaVersionPatch { + version: String +} + +input SchemaVersionRef { + version: String +} + +type SecurityIssue { + id: ID! + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + AffectsAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type SecurityIssueAggregateResult { + count: Int + AlertTitleMin: String + AlertTitleMax: String + AlertMessageMin: String + AlertMessageMax: String + SuggestionsMin: String + SuggestionsMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + ActionMin: String + ActionMax: String + JiraUrlMin: String + JiraUrlMax: String + StatusMin: String + StatusMax: String + ReasonMin: String + ReasonMax: String + ErrorMin: String + ErrorMax: String +} + +input SecurityIssueFilter { + id: [ID!] + AlertTitle: StringExactFilter_StringRegExpFilter + AlertMessage: StringExactFilter + Suggestions: StringExactFilter + Severity: Severity_exact + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + Action: StringExactFilter + Status: StringExactFilter + Reason: StringExactFilter + Error: StringExactFilter + has: [SecurityIssueHasFilter] + and: [SecurityIssueFilter] + or: [SecurityIssueFilter] + not: SecurityIssueFilter +} + +enum SecurityIssueHasFilter { + AlertTitle + AlertMessage + Suggestions + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error + Affects +} + +input SecurityIssueOrder { + asc: SecurityIssueOrderable + desc: SecurityIssueOrderable + then: SecurityIssueOrder +} + +enum SecurityIssueOrderable { + AlertTitle + AlertMessage + Suggestions + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error +} + +input SecurityIssuePatch { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +input SecurityIssueRef { + id: ID + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +input Severity_exact { + eq: Severity + in: [Severity] + le: Severity + lt: Severity + ge: Severity + gt: Severity + between: Severity +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool(filter: BuildToolFilter): BuildTool! +} + +type SourceCodeToolAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + scmMin: String + scmMax: String + repositoryMin: String + repositoryMax: String + branchMin: String + branchMax: String + headCommitMin: String + headCommitMax: String + diffCommitsMin: String + diffCommitsMax: String + licenseNameMin: String + licenseNameMax: String + visibilityMin: String + visibilityMax: String + workflowNameMin: String + workflowNameMax: String + parentRepoMin: String + parentRepoMax: String +} + +input SourceCodeToolFilter { + id: StringHashFilter + repository: StringExactFilter_StringRegExpFilter + has: [SourceCodeToolHasFilter] + and: [SourceCodeToolFilter] + or: [SourceCodeToolFilter] + not: SourceCodeToolFilter +} + +enum SourceCodeToolHasFilter { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo + buildTool +} + +input SourceCodeToolOrder { + asc: SourceCodeToolOrderable + desc: SourceCodeToolOrderable + then: SourceCodeToolOrder +} + +enum SourceCodeToolOrderable { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo +} + +input SourceCodeToolPatch { + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input SourceCodeToolRef { + """id is randomly assigned""" + id: String + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input StringExactFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange +} + +input StringExactFilter_StringRegExpFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange + regexp: String +} + +input StringFullTextFilter { + alloftext: String + anyoftext: String +} + +input StringHashFilter { + eq: String + in: [String] +} + +input StringRange { + min: String! + max: String! +} + +input StringRegExpFilter { + regexp: String +} + +input StringTermFilter { + allofterms: String + anyofterms: String +} + +type Subscription { + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult +} + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + policiesAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TagAggregateResult { + count: Int + idMin: String + idMax: String + tagNameMin: String + tagNameMax: String + tagValueMin: String + tagValueMax: String + tagDescriptionMin: String + tagDescriptionMax: String + createdByMin: String + createdByMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input TagFilter { + id: StringExactFilter + tagName: StringExactFilter + tagValue: StringExactFilter + createdBy: StringExactFilter + has: [TagHasFilter] + and: [TagFilter] + or: [TagFilter] + not: TagFilter +} + +enum TagHasFilter { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt + policies +} + +input TagOrder { + asc: TagOrderable + desc: TagOrderable + then: TagOrder +} + +enum TagOrderable { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt +} + +input TagPatch { + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +input TagRef { + id: String + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +type Team implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + organization(filter: OrganizationFilter): Organization! + applications(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application!] + labels(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + applicationsAggregate(filter: ApplicationFilter): ApplicationAggregateResult + labelsAggregate(filter: KeyValueFilter): KeyValueAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TeamAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input TeamFilter { + id: StringHashFilter + name: StringExactFilter + has: [TeamHasFilter] + and: [TeamFilter] + or: [TeamFilter] + not: TeamFilter +} + +enum TeamHasFilter { + id + name + roles + organization + applications + labels + policies + policyEnforcements +} + +input TeamOrder { + asc: TeamOrderable + desc: TeamOrderable + then: TeamOrder +} + +enum TeamOrderable { + id + name +} + +input TeamPatch { + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +input TeamRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type ToolsUsedAggregateResult { + count: Int + sourceMin: String + sourceMax: String + buildMin: String + buildMax: String + artifactMin: String + artifactMax: String + deployMin: String + deployMax: String + sbomMin: String + sbomMax: String +} + +input ToolsUsedFilter { + id: [ID!] + has: [ToolsUsedHasFilter] + and: [ToolsUsedFilter] + or: [ToolsUsedFilter] + not: ToolsUsedFilter +} + +enum ToolsUsedHasFilter { + source + build + artifact + deploy + sbom + misc +} + +input ToolsUsedOrder { + asc: ToolsUsedOrderable + desc: ToolsUsedOrderable + then: ToolsUsedOrder +} + +enum ToolsUsedOrderable { + source + build + artifact + deploy + sbom +} + +input ToolsUsedPatch { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input ToolsUsedRef { + id: ID + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input UpdateApplicationDeploymentInput { + filter: ApplicationDeploymentFilter! + set: ApplicationDeploymentPatch + remove: ApplicationDeploymentPatch +} + +type UpdateApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input UpdateApplicationDeploymentRiskInput { + filter: ApplicationDeploymentRiskFilter! + set: ApplicationDeploymentRiskPatch + remove: ApplicationDeploymentRiskPatch +} + +type UpdateApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input UpdateApplicationEnvironmentInput { + filter: ApplicationEnvironmentFilter! + set: ApplicationEnvironmentPatch + remove: ApplicationEnvironmentPatch +} + +type UpdateApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input UpdateApplicationInput { + filter: ApplicationFilter! + set: ApplicationPatch + remove: ApplicationPatch +} + +type UpdateApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input UpdateApplicationRiskStatusInput { + filter: ApplicationRiskStatusFilter! + set: ApplicationRiskStatusPatch + remove: ApplicationRiskStatusPatch +} + +type UpdateApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input UpdateArtifactInput { + filter: ArtifactFilter! + set: ArtifactPatch + remove: ArtifactPatch +} + +type UpdateArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input UpdateArtifactScanDataInput { + filter: ArtifactScanDataFilter! + set: ArtifactScanDataPatch + remove: ArtifactScanDataPatch +} + +type UpdateArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input UpdateBuildToolInput { + filter: BuildToolFilter! + set: BuildToolPatch + remove: BuildToolPatch +} + +type UpdateBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input UpdateCommitMetaDataInput { + filter: CommitMetaDataFilter! + set: CommitMetaDataPatch + remove: CommitMetaDataPatch +} + +type UpdateCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input UpdateComponentInput { + filter: ComponentFilter! + set: ComponentPatch + remove: ComponentPatch +} + +type UpdateComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input UpdateCredentialsInput { + filter: CredentialsFilter! + set: CredentialsPatch + remove: CredentialsPatch +} + +type UpdateCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input UpdateCWEInput { + filter: CWEFilter! + set: CWEPatch + remove: CWEPatch +} + +type UpdateCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input UpdateDeploymentTargetInput { + filter: DeploymentTargetFilter! + set: DeploymentTargetPatch + remove: DeploymentTargetPatch +} + +type UpdateDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input UpdateEnvironmentInput { + filter: EnvironmentFilter! + set: EnvironmentPatch + remove: EnvironmentPatch +} + +type UpdateEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input UpdateFeatureModeInput { + filter: FeatureModeFilter! + set: FeatureModePatch + remove: FeatureModePatch +} + +type UpdateFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input UpdateIntegratorInput { + filter: IntegratorFilter! + set: IntegratorPatch + remove: IntegratorPatch +} + +type UpdateIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input UpdateKeyValueInput { + filter: KeyValueFilter! + set: KeyValuePatch + remove: KeyValuePatch +} + +type UpdateKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input UpdateOrganizationInput { + filter: OrganizationFilter! + set: OrganizationPatch + remove: OrganizationPatch +} + +type UpdateOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input UpdatePolicyDefinitionInput { + filter: PolicyDefinitionFilter! + set: PolicyDefinitionPatch + remove: PolicyDefinitionPatch +} + +type UpdatePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input UpdatePolicyEnforcementInput { + filter: PolicyEnforcementFilter! + set: PolicyEnforcementPatch + remove: PolicyEnforcementPatch +} + +type UpdatePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input UpdateRBACInput { + filter: RBACFilter! + set: RBACPatch + remove: RBACPatch +} + +type UpdateRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + numUids: Int +} + +input UpdateRoleInput { + filter: RoleFilter! + set: RolePatch + remove: RolePatch +} + +type UpdateRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input UpdateRunHistoryInput { + filter: RunHistoryFilter! + set: RunHistoryPatch + remove: RunHistoryPatch +} + +type UpdateRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input UpdateSchemaVersionInput { + filter: SchemaVersionFilter! + set: SchemaVersionPatch + remove: SchemaVersionPatch +} + +type UpdateSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input UpdateSecurityIssueInput { + filter: SecurityIssueFilter! + set: SecurityIssuePatch + remove: SecurityIssuePatch +} + +type UpdateSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input UpdateSourceCodeToolInput { + filter: SourceCodeToolFilter! + set: SourceCodeToolPatch + remove: SourceCodeToolPatch +} + +type UpdateSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input UpdateTagInput { + filter: TagFilter! + set: TagPatch + remove: TagPatch +} + +type UpdateTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input UpdateTeamInput { + filter: TeamFilter! + set: TeamPatch + remove: TeamPatch +} + +type UpdateTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input UpdateToolsUsedInput { + filter: ToolsUsedFilter! + set: ToolsUsedPatch + remove: ToolsUsedPatch +} + +type UpdateToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input UpdateVulnerabilityInput { + filter: VulnerabilityFilter! + set: VulnerabilityPatch + remove: VulnerabilityPatch +} + +type UpdateVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Vulnerability { + id: String! + parent: String! + ratings: Severity + cwes(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + cwesAggregate(filter: CWEFilter): CWEAggregateResult + affectsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type VulnerabilityAggregateResult { + count: Int + idMin: String + idMax: String + parentMin: String + parentMax: String + summaryMin: String + summaryMax: String + detailMin: String + detailMax: String + recommendationMin: String + recommendationMax: String + publishedMin: DateTime + publishedMax: DateTime + modifiedMin: DateTime + modifiedMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + cvssMin: Float + cvssMax: Float + cvssSum: Float + cvssAvg: Float + priorityMin: String + priorityMax: String + epssMin: Float + epssMax: Float + epssSum: Float + epssAvg: Float + cisa_kevMin: String + cisa_kevMax: String +} + +input VulnerabilityFilter { + id: StringHashFilter + parent: StringExactFilter_StringRegExpFilter + ratings: Severity_exact + createdAt: DateTimeFilter + cvss: FloatFilter + priority: StringExactFilter_StringRegExpFilter + epss: FloatFilter + cisa_kev: StringExactFilter_StringRegExpFilter + has: [VulnerabilityHasFilter] + and: [VulnerabilityFilter] + or: [VulnerabilityFilter] + not: VulnerabilityFilter +} + +enum VulnerabilityHasFilter { + id + parent + ratings + cwes + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev + affects +} + +input VulnerabilityOrder { + asc: VulnerabilityOrderable + desc: VulnerabilityOrderable + then: VulnerabilityOrder +} + +enum VulnerabilityOrderable { + id + parent + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev +} + +input VulnerabilityPatch { + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input VulnerabilityRef { + id: String + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input WithinFilter { + polygon: PolygonRef! +} + diff --git a/logger/logging.go b/logger/logging.go new file mode 100644 index 0000000..4ed075d --- /dev/null +++ b/logger/logging.go @@ -0,0 +1,8 @@ +package logger + +import "go.uber.org/zap" + +var ( + Logger *zap.Logger + Sl *zap.SugaredLogger +) diff --git a/main.go b/main.go new file mode 100644 index 0000000..f77c039 --- /dev/null +++ b/main.go @@ -0,0 +1,49 @@ +package main + +import ( + "flag" + "runtime" + "upgradationScript/common" + "upgradationScript/logger" + + _ "github.com/Khan/genqlient/generate" + "github.com/OpsMx/go-app-base/version" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var ( + appName = "upgrade-job" + configFile = flag.String("configFile", "/app/config/"+appName+".yaml", "Configuration file location") + //configFile = flag.String("configFile", "/home/dell/upgradationScript/config.yaml", "Configuration file location") +) + +func main() { + + zapConfig := zap.NewProductionConfig() + zapConfig.Level.SetLevel(zapcore.ErrorLevel) + logger.Logger, _ = zapConfig.Build() + logger.Sl = logger.Logger.Sugar() + + zapConfig.Level.SetLevel(zap.DebugLevel) + logger.Logger, _ = zapConfig.Build() + logger.Sl = logger.Logger.Sugar() + + logger.Sl.Infow("starting", + "appName", appName, + "version", version.VersionString(), + "gitBranch", version.GitBranch(), + "gitHash", version.GitHash(), + "buildType", version.BuildType(), + "os", runtime.GOOS, + "arch", runtime.GOARCH, + "cores", runtime.NumCPU(), + ) + + common.LoadConfigurationFile(*configFile) + + if err := common.StartUpgrade(); err != nil { + logger.Sl.Fatal(err.Error()) + } + +} diff --git a/policies/genqlient.yaml b/policies/genqlient.yaml new file mode 100644 index 0000000..998eab6 --- /dev/null +++ b/policies/genqlient.yaml @@ -0,0 +1,17 @@ +schema: schema.graphql +operations: +- queries.graphql +generated: schema-generated.go +package: policyingenstionscript +use_struct_references: true +bindings: + Boolean: + type: "*bool" + DateTime: + type: "*time.Time" + Int64: + type: int64 + Int: + type: "*int" + ID: + type: "*string" diff --git a/policies/policy_ingestion.go b/policies/policy_ingestion.go new file mode 100644 index 0000000..b6ff2f7 --- /dev/null +++ b/policies/policy_ingestion.go @@ -0,0 +1,329 @@ +package policyingenstionscript + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + "upgradationScript/logger" + + "github.com/Khan/genqlient/graphql" +) + +type PolicyDefinitionScript struct { + PolicyId string `json:"policyId,omitempty" yaml:"policyId,omitempty"` + PolicyName string `json:"policyName,omitempty" yaml:"policyName,omitempty"` + Category string `json:"category,omitempty" yaml:"category,omitempty"` + Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + ScheduledPolicy bool `json:"scheduled_policy,omitempty" yaml:"scheduledPolicy,omitempty"` + ScriptId string `json:"scriptId,omitempty" yaml:"scriptId,omitempty"` + Variables string `json:"variables,omitempty" yaml:"variables,omitempty"` + ConditionName string `json:"conditionName,omitempty" yaml:"conditionName,omitempty"` + Suggestion interface{} `json:"suggestion,omitempty" yaml:"suggestion,omitempty"` +} + +type PolicyEnforcementScript struct { + PolicyId string `json:"policyId,omitempty" yaml:"policyId,omitempty"` + Severity string `json:"severity,omitempty" yaml:"severity,omitempty"` + Action string `json:"action,omitempty" yaml:"action,omitempty"` + ConditionValue string `json:"conditionValue,omitempty" yaml:"conditionValue,omitempty"` + Status bool `json:"status,omitempty" yaml:"status,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + DatasourceTool string `json:"datasourceTool,omitempty" yaml:"datasourceTool,omitempty"` +} + +type TagScript struct { + Id string `json:"id,omitempty" yaml:"id,omitempty"` + TagName string `json:"tagName,omitempty" yaml:"tagName,omitempty"` + TagValue string `json:"tagValue,omitempty" yaml:"tagValue,omitempty"` + TagDescription string `json:"tagDescription,omitempty" yaml:"tagDescription,omitempty"` + CreatedBy string `json:"createdBy,omitempty" yaml:"createdBy,omitempty"` +} + +var tagIdInScriptMapIdInDB = make(map[string]string) +var policyDefInScriptMapIdInDB = make(map[string]string) + +func UpgradePolicyAndTagData(graphqlClient graphql.Client, orgId string) error { + + if err := ingestTags(graphqlClient); err != nil { + return fmt.Errorf("upgradePolicyAndTagData: %s", err.Error()) + } + + if err := ingestPolicyDef(graphqlClient, orgId); err != nil { + return fmt.Errorf("upgradePolicyAndTagData: %s", err.Error()) + } + + if err := ingestPolicyEnforcement(graphqlClient, orgId); err != nil { + return fmt.Errorf("upgradePolicyAndTagData: %s", err.Error()) + } + return nil + +} + +func ingestTags(graphqlClient graphql.Client) error { + + logger.Sl.Debugf("---------------------Starting Tags ingestion---------------------") + + for i, tag := range tagPolicy { + + var allTagInputs []*AddTagInput + var tagScript TagScript + if err := json.Unmarshal([]byte(tag), &tagScript); err != nil { + return fmt.Errorf("ingestTags: json.Unmarshal: iteration: %d err: %s", i, err.Error()) + } + + existingTag, err := checkIfTagExists(context.Background(), graphqlClient, tagScript.TagName, tagScript.TagValue, tagScript.CreatedBy) + if err != nil { + return fmt.Errorf("ingestTags: checkIfTagExists: iteration: %d err: %s", i, err.Error()) + } + + if existingTag.QueryTag != nil && len(existingTag.QueryTag) > 0 { + + tagIdInScriptMapIdInDB[tagScript.Id] = existingTag.QueryTag[0].Id + + logger.Sl.Debugf("ingestTags: tag already exists skipping ingestion of tag iteration: %d", i) + continue + } + + lastTag, err := getLastTagId(context.Background(), graphqlClient) + if err != nil { + return fmt.Errorf("ingestTags: getLastTagId: iteration: %d err: %s", i, err.Error()) + } + + tagIdInt := *lastTag.AggregateTag.Count + 1 + + currTime := time.Now().UTC() + newTagInput := AddTagInput{ + Id: fmt.Sprintf("%d", tagIdInt), + TagName: tagScript.TagName, + TagValue: tagScript.TagValue, + TagDescription: tagScript.TagDescription, + CreatedBy: tagScript.CreatedBy, + CreatedAt: &currTime, + UpdatedAt: &currTime, + } + + logger.Sl.Debugf("ingestTags: will add new tag %s", newTagInput.TagValue) + + tagIdInScriptMapIdInDB[tagScript.Id] = newTagInput.Id + + allTagInputs = append(allTagInputs, &newTagInput) + + if _, err := addTag(context.Background(), graphqlClient, allTagInputs); err != nil { + return fmt.Errorf("ingestTags: addTags: error: %s", err.Error()) + } + + } + + logger.Sl.Debugf("-------------------Completed Tags ingestion----------------------") + + return nil +} + +func ingestPolicyDef(graphqlClient graphql.Client, orgId string) error { + + logger.Sl.Debugf("---------------Started PolicyDef ingestion-------------------") + + for i, policyDef := range policyDefinition { + + var addPoliciesDef []*AddPolicyDefinitionInput + + var policyDefScript PolicyDefinitionScript + if err := json.Unmarshal([]byte(policyDef), &policyDefScript); err != nil { + return fmt.Errorf("ingestPolicyDef: json.Unmarshal: iteration: %v err: %s", i, err.Error()) + } + + scriptID, _ := strconv.Atoi(policyDefScript.ScriptId) + + checkIfPolicyNameExistsResp, err := checkIfPolicyNameExists(context.Background(), graphqlClient, policyDefScript.PolicyName, orgId) + if err != nil { + return fmt.Errorf("ingestPolicyDef: checkIfPolicyNameExists: iteration: %v err: %s", i, err.Error()) + } + + now := time.Now().UTC() + + // update policy flow + if len(checkIfPolicyNameExistsResp.QueryPolicyDefinition) != 0 { + policyDefInScriptMapIdInDB[policyDefScript.PolicyId] = checkIfPolicyNameExistsResp.QueryPolicyDefinition[0].Id + logger.Sl.Debugf("ingestPolicyDef: policyDef PolicyName already exists checking if exact policy is available iteration: %v", i) + + exactExists, err := checkIfExactPolicyDefinitionExists(context.Background(), graphqlClient, policyDefScript.PolicyName, policyDefScript.Category, policyDefScript.Stage, policyDefScript.Description, &policyDefScript.ScheduledPolicy, scriptMap[scriptID], policyDefScript.Variables, policyDefScript.ConditionName, "", orgId) + if err != nil { + return fmt.Errorf("ingestPolicyDef: checkIfExactPolicyDefinitionExists: iteration: %v err: %s", i, err.Error()) + } + + if len(exactExists.QueryPolicyDefinition) != 0 { + logger.Sl.Debugf("ingestPolicyDef: exact policyDef already exists skipping iteration: %v", i) + continue + } + + if _, err := updatePolicyDefinition(context.Background(), graphqlClient, checkIfPolicyNameExistsResp.QueryPolicyDefinition[0].Id, policyDefScript.Category, policyDefScript.Stage, policyDefScript.Description, &policyDefScript.ScheduledPolicy, scriptMap[scriptID], policyDefScript.Variables, policyDefScript.ConditionName, "", &now); err != nil { + return fmt.Errorf("ingestPolicyDef: updatePolicyDefinition: iteration: %v err: %s", i, err.Error()) + } + + logger.Sl.Debugf("ingestPolicyDef: policyDef updated iteration: %v", i) + continue + } + + getLastPolicyIdResp, err := getLastPolicyId(context.Background(), graphqlClient, orgId) + if err != nil { + return fmt.Errorf("ingestPolicyDef: getLastPolicyId: iteration: %v err: %s", i, err.Error()) + } + + policyDefIdInt := *getLastPolicyIdResp.QueryOrganization[0].PoliciesAggregate.Count + 1 + + policy := AddPolicyDefinitionInput{ + Id: fmt.Sprintf("%v", policyDefIdInt), + OwnerOrg: &OrganizationRef{ + Id: orgId, + }, + CreatedAt: &now, + UpdatedAt: &now, + PolicyName: policyDefScript.PolicyName, + Category: policyDefScript.Category, + Stage: policyDefScript.Stage, + Description: policyDefScript.Description, + Script: scriptMap[scriptID], + ScheduledPolicy: &policyDefScript.ScheduledPolicy, + Variables: policyDefScript.Variables, + ConditionName: policyDefScript.ConditionName, + } + + policyDefInScriptMapIdInDB[policyDefScript.PolicyId] = policy.Id + + logger.Sl.Debugf("ingestPolicyDef: will add policyDef iteration: %d", i) + + addPoliciesDef = append(addPoliciesDef, &policy) + + if _, err := addPolicyDefinition(context.TODO(), graphqlClient, addPoliciesDef); err != nil { + return fmt.Errorf("ingestPolicyDef: addPolicyDefinition: err: %s", err.Error()) + } + } + + logger.Sl.Debug("----------------Completed PolicyDef ingestion-----------------------") + + return nil +} + +func ingestPolicyEnforcement(graphqlClient graphql.Client, orgId string) error { + + logger.Sl.Debugf("---------------------Starting Policy Enf ingestion---------------------") + + var allPolicyEnf []*AddPolicyEnforcementInput + + for i, enf := range policyEnforcement { + + var policyEnfScript PolicyEnforcementScript + if err := json.Unmarshal([]byte(enf), &policyEnfScript); err != nil { + return fmt.Errorf("ingestPolicyEnforcement: json.Unmarshal: iteration: %v err: %s", i, err.Error()) + } + + checkIfPolicyEnforcementExistsResp, err := checkIfPolicyEnforcementExists(context.Background(), graphqlClient, policyEnfScript.DatasourceTool, policyDefInScriptMapIdInDB[policyEnfScript.PolicyId]) + if err != nil { + return fmt.Errorf("ingestPolicyEnforcement: checkIfPolicyEnforcementExists: iteration: %v err: %s", i, err.Error()) + } + + now := time.Now().UTC() + + if len(checkIfPolicyEnforcementExistsResp.QueryPolicyEnforcement) != 0 { + + for _, eachPolicyEnf := range checkIfPolicyEnforcementExistsResp.QueryPolicyEnforcement { + + if eachPolicyEnf.ConditionValue != policyEnfScript.ConditionValue { + if _, err := updatePolicyEnforcement(context.Background(), graphqlClient, policyEnfScript.ConditionValue, eachPolicyEnf.Id, &now); err != nil { + return fmt.Errorf("ingestPolicyEnforcement: updatePolicyEnforcement: policyEnfId: %s iteration: %v err: %s", *eachPolicyEnf.Id, i, err.Error()) + } + } + + for _, eachTagScript := range policyEnfScript.Tags { + + found := false + checkForTagId := tagIdInScriptMapIdInDB[eachTagScript] + + for _, eachTagDb := range eachPolicyEnf.Tags { + + if found { + continue + } + + if eachTagDb.Id == checkForTagId { + found = true + } + } + + if found { + continue + } + + if _, err := assignTagsToPolicy(context.Background(), graphqlClient, eachPolicyEnf.Id, &TagRef{Id: checkForTagId}, &now); err != nil { + return fmt.Errorf("ingestPolicyEnforcement: assignTagsToPolicy: policyEnfId: %s iteration: %v err: %s", *eachPolicyEnf.Id, i, err.Error()) + } + + } + + } + + continue + + } + + policyEnf := AddPolicyEnforcementInput{ + Policy: &PolicyDefinitionRef{ + Id: policyDefInScriptMapIdInDB[policyEnfScript.PolicyId], + }, + EnforcedOrg: &OrganizationRef{ + Id: orgId, + }, + Status: &policyEnfScript.Status, + Severity: MapSeverity(policyEnfScript.Severity), + Action: policyEnfScript.Action, + ConditionValue: policyEnfScript.ConditionValue, + CreatedAt: &now, + UpdatedAt: &now, + DatasourceTool: policyEnfScript.DatasourceTool, + } + + var tags []*TagRef + for _, tagId := range policyEnfScript.Tags { + tags = append(tags, &TagRef{ + Id: tagIdInScriptMapIdInDB[tagId], + }) + } + + policyEnf.Tags = tags + + allPolicyEnf = append(allPolicyEnf, &policyEnf) + } + + if allPolicyEnf != nil { + if _, err := addPolicyEnforcement(context.TODO(), graphqlClient, allPolicyEnf); err != nil { + return fmt.Errorf("ingestPolicyEnforcement: addPolicyEnforcement: err: %s", err.Error()) + } + } + + logger.Sl.Debugf("---------------------Completed Policy Enf ingestion---------------------") + + return nil +} + +func MapSeverity(s string) Severity { + switch strings.ToLower(s) { + case "critical": + return SeverityCritical + case "high": + return SeverityHigh + case "medium": + return SeverityMedium + case "low": + return SeverityLow + case "info": + return SeverityInfo + case "none": + return SeverityNone + default: + return SeverityUnknown + } +} diff --git a/policies/queries.graphql b/policies/queries.graphql new file mode 100644 index 0000000..2e03e54 --- /dev/null +++ b/policies/queries.graphql @@ -0,0 +1,169 @@ +query checkIfTagExists( + $tagName: String! + $tagValue: String! + $createdBy: String! +) { + queryTag( + filter: { + tagName: { eq: $tagName } + tagValue: { eq: $tagValue } + createdBy: { eq: $createdBy } + } + ) { + id + } +} + +query getLastTagId { + aggregateTag { + count + } +} + +mutation addTag($tags:[AddTagInput!]!) { + addTag(input: $tags) { + numUids + } +} + +query checkIfPolicyNameExists($policyName: String!, $orgId: String!) { + queryPolicyDefinition(filter: { policyName: { eq: $policyName } }) @cascade { + id + policyName + stage + category + ownerOrg(filter: { id: { eq: $orgId } }) { + id + } + } +} + +query getLastPolicyId($organizationId: String!) { + queryOrganization(filter: { id: { eq: $organizationId } }) { + policiesAggregate { + count + } + } +} + +mutation addPolicyDefinition ($input: [AddPolicyDefinitionInput!]!) { + addPolicyDefinition(input: $input) { + numUids + } +} + +query checkIfExactPolicyDefinitionExists( + $policyName: String! + $category: String! + $stage: String! + $description: String! + $scheduledPolicy: Boolean! + $script: String! + $variables: String! + $conditionName: String! + $suggestion: String! + $orgId: String! +) { + queryPolicyDefinition( + filter: { + policyName: { eq: $policyName } + category: { eq: $category } + stage: { eq: $stage } + description: { eq: $description } + scheduledPolicy: $scheduledPolicy + script: { eq: $script } + variables: { eq: $variables } + conditionName: { eq: $conditionName } + suggestion: { eq: $suggestion } + } + ) @cascade { + id + ownerOrg(filter: { id: { eq: $orgId } }) { + id + } + } +} + +mutation updatePolicyDefinition( + $policyId: String! + $category: String! + $stage: String! + $description: String! + $scheduledPolicy: Boolean! + $script: String! + $variables: String! + $conditionName: String! + $suggestion: String! + $updatedAt: DateTime! +) { + updatePolicyDefinition( + input: { + filter: { id: { eq: $policyId } } + set: { + category: $category + stage: $stage + description: $description + scheduledPolicy: $scheduledPolicy + script: $script + variables: $variables + conditionName: $conditionName + suggestion: $suggestion + updatedAt: $updatedAt + } + } + ) { + numUids + } +} + +query checkIfPolicyEnforcementExists( + $datasourceTool: String! + $policyId: String! +) { + queryPolicyEnforcement(filter: { datasourceTool: { eq: $datasourceTool } }) @cascade(fields: ["policy"]) { + id + conditionValue + policy(filter: { id: { eq: $policyId } }) { + id + } + tags { + id + } + } +} + +mutation addPolicyEnforcement ($input: [AddPolicyEnforcementInput!]!) { + addPolicyEnforcement(input: $input) { + numUids + } +} + +mutation updatePolicyEnforcement( + $conditionValue: String! + $policyEnfId: ID! + $updatedAt: DateTime! +) { + updatePolicyEnforcement( + input: { + set: { conditionValue: $conditionValue, updatedAt: $updatedAt } + filter: { id: [$policyEnfId] } + } + ) { + numUids + } +} + +mutation assignTagsToPolicy( + $policyEnforcementId: ID! + $tags: TagRef! + $updatedAt: DateTime! +) { + updatePolicyEnforcement( + input: { + filter: { id: [$policyEnforcementId] } + set: { tags: [$tags], updatedAt: $updatedAt } + } + ) { + numUids + } +} diff --git a/policies/schema-generated.go b/policies/schema-generated.go new file mode 100644 index 0000000..f89ff9e --- /dev/null +++ b/policies/schema-generated.go @@ -0,0 +1,2486 @@ +// Code generated by github.com/Khan/genqlient, DO NOT EDIT. + +package policyingenstionscript + +import ( + "context" + "time" + + "github.com/Khan/genqlient/graphql" +) + +type AddPolicyDefinitionInput struct { + Id string `json:"id"` + OwnerOrg *OrganizationRef `json:"ownerOrg,omitempty"` + OwnerTeam *TeamRef `json:"ownerTeam,omitempty"` + OwnerApplication *ApplicationRef `json:"ownerApplication,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + PolicyName string `json:"policyName"` + Category string `json:"category"` + Stage string `json:"stage"` + Description string `json:"description"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + Script string `json:"script"` + Variables string `json:"variables"` + ConditionName string `json:"conditionName"` + Suggestion string `json:"suggestion"` +} + +// GetId returns AddPolicyDefinitionInput.Id, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetId() string { return v.Id } + +// GetOwnerOrg returns AddPolicyDefinitionInput.OwnerOrg, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetOwnerOrg() *OrganizationRef { return v.OwnerOrg } + +// GetOwnerTeam returns AddPolicyDefinitionInput.OwnerTeam, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetOwnerTeam() *TeamRef { return v.OwnerTeam } + +// GetOwnerApplication returns AddPolicyDefinitionInput.OwnerApplication, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetOwnerApplication() *ApplicationRef { return v.OwnerApplication } + +// GetCreatedAt returns AddPolicyDefinitionInput.CreatedAt, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns AddPolicyDefinitionInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicyName returns AddPolicyDefinitionInput.PolicyName, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetPolicyName() string { return v.PolicyName } + +// GetCategory returns AddPolicyDefinitionInput.Category, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetCategory() string { return v.Category } + +// GetStage returns AddPolicyDefinitionInput.Stage, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetStage() string { return v.Stage } + +// GetDescription returns AddPolicyDefinitionInput.Description, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetDescription() string { return v.Description } + +// GetScheduledPolicy returns AddPolicyDefinitionInput.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetScript returns AddPolicyDefinitionInput.Script, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetScript() string { return v.Script } + +// GetVariables returns AddPolicyDefinitionInput.Variables, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetVariables() string { return v.Variables } + +// GetConditionName returns AddPolicyDefinitionInput.ConditionName, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetConditionName() string { return v.ConditionName } + +// GetSuggestion returns AddPolicyDefinitionInput.Suggestion, and is useful for accessing the field via an interface. +func (v *AddPolicyDefinitionInput) GetSuggestion() string { return v.Suggestion } + +type AddPolicyEnforcementInput struct { + Policy *PolicyDefinitionRef `json:"policy,omitempty"` + EnforcedOrg *OrganizationRef `json:"enforcedOrg,omitempty"` + EnforcedTeam *TeamRef `json:"enforcedTeam,omitempty"` + EnforcedApplication *ApplicationRef `json:"enforcedApplication,omitempty"` + Status *bool `json:"status"` + ForceApply *bool `json:"forceApply"` + Severity Severity `json:"severity"` + DatasourceTool string `json:"datasourceTool"` + Action string `json:"action"` + ConditionValue string `json:"conditionValue"` + Environments []*EnvironmentRef `json:"environments,omitempty"` + Tags []*TagRef `json:"tags,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetPolicy returns AddPolicyEnforcementInput.Policy, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetPolicy() *PolicyDefinitionRef { return v.Policy } + +// GetEnforcedOrg returns AddPolicyEnforcementInput.EnforcedOrg, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetEnforcedOrg() *OrganizationRef { return v.EnforcedOrg } + +// GetEnforcedTeam returns AddPolicyEnforcementInput.EnforcedTeam, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetEnforcedTeam() *TeamRef { return v.EnforcedTeam } + +// GetEnforcedApplication returns AddPolicyEnforcementInput.EnforcedApplication, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetEnforcedApplication() *ApplicationRef { + return v.EnforcedApplication +} + +// GetStatus returns AddPolicyEnforcementInput.Status, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetStatus() *bool { return v.Status } + +// GetForceApply returns AddPolicyEnforcementInput.ForceApply, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetForceApply() *bool { return v.ForceApply } + +// GetSeverity returns AddPolicyEnforcementInput.Severity, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetSeverity() Severity { return v.Severity } + +// GetDatasourceTool returns AddPolicyEnforcementInput.DatasourceTool, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetDatasourceTool() string { return v.DatasourceTool } + +// GetAction returns AddPolicyEnforcementInput.Action, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetAction() string { return v.Action } + +// GetConditionValue returns AddPolicyEnforcementInput.ConditionValue, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetConditionValue() string { return v.ConditionValue } + +// GetEnvironments returns AddPolicyEnforcementInput.Environments, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetEnvironments() []*EnvironmentRef { return v.Environments } + +// GetTags returns AddPolicyEnforcementInput.Tags, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetTags() []*TagRef { return v.Tags } + +// GetCreatedAt returns AddPolicyEnforcementInput.CreatedAt, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns AddPolicyEnforcementInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *AddPolicyEnforcementInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +type AddTagInput struct { + Id string `json:"id"` + TagName string `json:"tagName"` + TagValue string `json:"tagValue"` + TagDescription string `json:"tagDescription"` + CreatedBy string `json:"createdBy"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + Policies []*PolicyEnforcementRef `json:"policies,omitempty"` +} + +// GetId returns AddTagInput.Id, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetId() string { return v.Id } + +// GetTagName returns AddTagInput.TagName, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetTagName() string { return v.TagName } + +// GetTagValue returns AddTagInput.TagValue, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetTagValue() string { return v.TagValue } + +// GetTagDescription returns AddTagInput.TagDescription, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetTagDescription() string { return v.TagDescription } + +// GetCreatedBy returns AddTagInput.CreatedBy, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetCreatedBy() string { return v.CreatedBy } + +// GetCreatedAt returns AddTagInput.CreatedAt, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns AddTagInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicies returns AddTagInput.Policies, and is useful for accessing the field via an interface. +func (v *AddTagInput) GetPolicies() []*PolicyEnforcementRef { return v.Policies } + +type ApplicationDeploymentRef struct { + // id is randomly assigned + Id string `json:"id"` + Artifact []*ArtifactRef `json:"artifact,omitempty"` + ApplicationEnvironment *ApplicationEnvironmentRef `json:"applicationEnvironment,omitempty"` + DeployedAt *time.Time `json:"deployedAt"` + // deploymentStage is an enum and can be discovered, current, previous or blocked + DeploymentStage DeploymentStage `json:"deploymentStage"` + // source is argo, spinnaker etc + Source string `json:"source"` + // component would be a service + Component string `json:"component"` + // user who deployed the artifact + DeployedBy string `json:"deployedBy"` + ToolsUsed *ToolsUsedRef `json:"toolsUsed,omitempty"` + DeploymentRisk *ApplicationDeploymentRiskRef `json:"deploymentRisk,omitempty"` + PolicyRunHistory []*RunHistoryRef `json:"policyRunHistory,omitempty"` +} + +// GetId returns ApplicationDeploymentRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetId() string { return v.Id } + +// GetArtifact returns ApplicationDeploymentRef.Artifact, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetArtifact() []*ArtifactRef { return v.Artifact } + +// GetApplicationEnvironment returns ApplicationDeploymentRef.ApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetApplicationEnvironment() *ApplicationEnvironmentRef { + return v.ApplicationEnvironment +} + +// GetDeployedAt returns ApplicationDeploymentRef.DeployedAt, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeployedAt() *time.Time { return v.DeployedAt } + +// GetDeploymentStage returns ApplicationDeploymentRef.DeploymentStage, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeploymentStage() DeploymentStage { return v.DeploymentStage } + +// GetSource returns ApplicationDeploymentRef.Source, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetSource() string { return v.Source } + +// GetComponent returns ApplicationDeploymentRef.Component, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetComponent() string { return v.Component } + +// GetDeployedBy returns ApplicationDeploymentRef.DeployedBy, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeployedBy() string { return v.DeployedBy } + +// GetToolsUsed returns ApplicationDeploymentRef.ToolsUsed, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetToolsUsed() *ToolsUsedRef { return v.ToolsUsed } + +// GetDeploymentRisk returns ApplicationDeploymentRef.DeploymentRisk, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetDeploymentRisk() *ApplicationDeploymentRiskRef { + return v.DeploymentRisk +} + +// GetPolicyRunHistory returns ApplicationDeploymentRef.PolicyRunHistory, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRef) GetPolicyRunHistory() []*RunHistoryRef { return v.PolicyRunHistory } + +type ApplicationDeploymentRiskRef struct { + Id *string `json:"id"` + SourceCodeAlertsScore *int `json:"sourceCodeAlertsScore"` + BuildAlertsScore *int `json:"buildAlertsScore"` + ArtifactAlertsScore *int `json:"artifactAlertsScore"` + DeploymentAlertsScore *int `json:"deploymentAlertsScore"` + DeploymentRiskStatus RiskStatus `json:"deploymentRiskStatus"` + ApplicationDeployment *ApplicationDeploymentRef `json:"applicationDeployment,omitempty"` +} + +// GetId returns ApplicationDeploymentRiskRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetId() *string { return v.Id } + +// GetSourceCodeAlertsScore returns ApplicationDeploymentRiskRef.SourceCodeAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetSourceCodeAlertsScore() *int { + return v.SourceCodeAlertsScore +} + +// GetBuildAlertsScore returns ApplicationDeploymentRiskRef.BuildAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetBuildAlertsScore() *int { return v.BuildAlertsScore } + +// GetArtifactAlertsScore returns ApplicationDeploymentRiskRef.ArtifactAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetArtifactAlertsScore() *int { return v.ArtifactAlertsScore } + +// GetDeploymentAlertsScore returns ApplicationDeploymentRiskRef.DeploymentAlertsScore, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetDeploymentAlertsScore() *int { + return v.DeploymentAlertsScore +} + +// GetDeploymentRiskStatus returns ApplicationDeploymentRiskRef.DeploymentRiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetDeploymentRiskStatus() RiskStatus { + return v.DeploymentRiskStatus +} + +// GetApplicationDeployment returns ApplicationDeploymentRiskRef.ApplicationDeployment, and is useful for accessing the field via an interface. +func (v *ApplicationDeploymentRiskRef) GetApplicationDeployment() *ApplicationDeploymentRef { + return v.ApplicationDeployment +} + +type ApplicationEnvironmentRef struct { + // id is randomly assigned + Id string `json:"id"` + Environment *EnvironmentRef `json:"environment,omitempty"` + Application *ApplicationRef `json:"application,omitempty"` + DeploymentTarget *DeploymentTargetRef `json:"deploymentTarget,omitempty"` + Namespace string `json:"namespace"` + // toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + ToolsUsed []string `json:"toolsUsed"` + Deployments []*ApplicationDeploymentRef `json:"deployments,omitempty"` + RiskStatus *ApplicationRiskStatusRef `json:"riskStatus,omitempty"` + Metadata []*KeyValueRef `json:"metadata,omitempty"` +} + +// GetId returns ApplicationEnvironmentRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetId() string { return v.Id } + +// GetEnvironment returns ApplicationEnvironmentRef.Environment, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetEnvironment() *EnvironmentRef { return v.Environment } + +// GetApplication returns ApplicationEnvironmentRef.Application, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetApplication() *ApplicationRef { return v.Application } + +// GetDeploymentTarget returns ApplicationEnvironmentRef.DeploymentTarget, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetDeploymentTarget() *DeploymentTargetRef { + return v.DeploymentTarget +} + +// GetNamespace returns ApplicationEnvironmentRef.Namespace, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetNamespace() string { return v.Namespace } + +// GetToolsUsed returns ApplicationEnvironmentRef.ToolsUsed, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetToolsUsed() []string { return v.ToolsUsed } + +// GetDeployments returns ApplicationEnvironmentRef.Deployments, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetDeployments() []*ApplicationDeploymentRef { + return v.Deployments +} + +// GetRiskStatus returns ApplicationEnvironmentRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetRiskStatus() *ApplicationRiskStatusRef { return v.RiskStatus } + +// GetMetadata returns ApplicationEnvironmentRef.Metadata, and is useful for accessing the field via an interface. +func (v *ApplicationEnvironmentRef) GetMetadata() []*KeyValueRef { return v.Metadata } + +type ApplicationRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Environments []*ApplicationEnvironmentRef `json:"environments,omitempty"` + Team *TeamRef `json:"team,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + Metadata []*KeyValueRef `json:"metadata,omitempty"` +} + +// GetId returns ApplicationRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetId() string { return v.Id } + +// GetName returns ApplicationRef.Name, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetName() string { return v.Name } + +// GetRoles returns ApplicationRef.Roles, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetRoles() []*RoleRef { return v.Roles } + +// GetEnvironments returns ApplicationRef.Environments, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetEnvironments() []*ApplicationEnvironmentRef { return v.Environments } + +// GetTeam returns ApplicationRef.Team, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetTeam() *TeamRef { return v.Team } + +// GetPolicies returns ApplicationRef.Policies, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns ApplicationRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetPolicyEnforcements() []*PolicyEnforcementRef { return v.PolicyEnforcements } + +// GetMetadata returns ApplicationRef.Metadata, and is useful for accessing the field via an interface. +func (v *ApplicationRef) GetMetadata() []*KeyValueRef { return v.Metadata } + +type ApplicationRiskStatusRef struct { + Id *string `json:"id"` + RiskStatus RiskStatus `json:"riskStatus"` + SourceCodeAlerts *int `json:"sourceCodeAlerts"` + BuildAlerts *int `json:"buildAlerts"` + ArtifactAlerts *int `json:"artifactAlerts"` + DeploymentAlerts *int `json:"deploymentAlerts"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + ApplicationEnvironment *ApplicationEnvironmentRef `json:"applicationEnvironment,omitempty"` +} + +// GetId returns ApplicationRiskStatusRef.Id, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetId() *string { return v.Id } + +// GetRiskStatus returns ApplicationRiskStatusRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetRiskStatus() RiskStatus { return v.RiskStatus } + +// GetSourceCodeAlerts returns ApplicationRiskStatusRef.SourceCodeAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetSourceCodeAlerts() *int { return v.SourceCodeAlerts } + +// GetBuildAlerts returns ApplicationRiskStatusRef.BuildAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetBuildAlerts() *int { return v.BuildAlerts } + +// GetArtifactAlerts returns ApplicationRiskStatusRef.ArtifactAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetArtifactAlerts() *int { return v.ArtifactAlerts } + +// GetDeploymentAlerts returns ApplicationRiskStatusRef.DeploymentAlerts, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetDeploymentAlerts() *int { return v.DeploymentAlerts } + +// GetCreatedAt returns ApplicationRiskStatusRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns ApplicationRiskStatusRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetApplicationEnvironment returns ApplicationRiskStatusRef.ApplicationEnvironment, and is useful for accessing the field via an interface. +func (v *ApplicationRiskStatusRef) GetApplicationEnvironment() *ApplicationEnvironmentRef { + return v.ApplicationEnvironment +} + +type ArtifactRef struct { + Id string `json:"id"` + ArtifactType string `json:"artifactType"` + ArtifactName string `json:"artifactName"` + ArtifactTag string `json:"artifactTag"` + ArtifactSha string `json:"artifactSha"` + ScanData []*ArtifactScanDataRef `json:"scanData,omitempty"` + ArtifactDeployment []*ApplicationDeploymentRef `json:"artifactDeployment,omitempty"` + BuildDetails *BuildToolRef `json:"buildDetails,omitempty"` +} + +// GetId returns ArtifactRef.Id, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetId() string { return v.Id } + +// GetArtifactType returns ArtifactRef.ArtifactType, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactType() string { return v.ArtifactType } + +// GetArtifactName returns ArtifactRef.ArtifactName, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactName() string { return v.ArtifactName } + +// GetArtifactTag returns ArtifactRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetArtifactSha returns ArtifactRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetScanData returns ArtifactRef.ScanData, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetScanData() []*ArtifactScanDataRef { return v.ScanData } + +// GetArtifactDeployment returns ArtifactRef.ArtifactDeployment, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetArtifactDeployment() []*ApplicationDeploymentRef { + return v.ArtifactDeployment +} + +// GetBuildDetails returns ArtifactRef.BuildDetails, and is useful for accessing the field via an interface. +func (v *ArtifactRef) GetBuildDetails() *BuildToolRef { return v.BuildDetails } + +type ArtifactScanDataRef struct { + Id string `json:"id"` + ArtifactSha string `json:"artifactSha"` + Tool string `json:"tool"` + ArtifactDetails *ArtifactRef `json:"artifactDetails,omitempty"` + LastScannedAt *time.Time `json:"lastScannedAt"` + CreatedAt *time.Time `json:"createdAt"` + VulnTrackingId string `json:"vulnTrackingId"` + Components []*ComponentRef `json:"components,omitempty"` + VulnCriticalCount *int `json:"vulnCriticalCount"` + VulnHighCount *int `json:"vulnHighCount"` + VulnMediumCount *int `json:"vulnMediumCount"` + VulnLowCount *int `json:"vulnLowCount"` + VulnInfoCount *int `json:"vulnInfoCount"` + VulnUnknownCount *int `json:"vulnUnknownCount"` + VulnNoneCount *int `json:"vulnNoneCount"` + VulnTotalCount *int `json:"vulnTotalCount"` + SbomUrl string `json:"sbomUrl"` + ArtifactLicenseScanUrl string `json:"artifactLicenseScanUrl"` + ArtifactSecretScanUrl string `json:"artifactSecretScanUrl"` + SourceLicenseScanUrl string `json:"sourceLicenseScanUrl"` + SourceSecretScanUrl string `json:"sourceSecretScanUrl"` + SourceScorecardScanUrl string `json:"sourceScorecardScanUrl"` + SourceSemgrepHighSeverityScanUrl string `json:"sourceSemgrepHighSeverityScanUrl"` + SourceSemgrepMediumSeverityScanUrl string `json:"sourceSemgrepMediumSeverityScanUrl"` + SourceSemgrepLowSeverityScanUrl string `json:"sourceSemgrepLowSeverityScanUrl"` + SourceSnykScanUrl string `json:"sourceSnykScanUrl"` + VirusTotalUrlScan string `json:"virusTotalUrlScan"` + RiskStatus RiskStatus `json:"riskStatus"` + ArtifactRunHistory []*RunHistoryRef `json:"artifactRunHistory,omitempty"` +} + +// GetId returns ArtifactScanDataRef.Id, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetId() string { return v.Id } + +// GetArtifactSha returns ArtifactScanDataRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetTool returns ArtifactScanDataRef.Tool, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetTool() string { return v.Tool } + +// GetArtifactDetails returns ArtifactScanDataRef.ArtifactDetails, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactDetails() *ArtifactRef { return v.ArtifactDetails } + +// GetLastScannedAt returns ArtifactScanDataRef.LastScannedAt, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetLastScannedAt() *time.Time { return v.LastScannedAt } + +// GetCreatedAt returns ArtifactScanDataRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetVulnTrackingId returns ArtifactScanDataRef.VulnTrackingId, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnTrackingId() string { return v.VulnTrackingId } + +// GetComponents returns ArtifactScanDataRef.Components, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetComponents() []*ComponentRef { return v.Components } + +// GetVulnCriticalCount returns ArtifactScanDataRef.VulnCriticalCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnCriticalCount() *int { return v.VulnCriticalCount } + +// GetVulnHighCount returns ArtifactScanDataRef.VulnHighCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnHighCount() *int { return v.VulnHighCount } + +// GetVulnMediumCount returns ArtifactScanDataRef.VulnMediumCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnMediumCount() *int { return v.VulnMediumCount } + +// GetVulnLowCount returns ArtifactScanDataRef.VulnLowCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnLowCount() *int { return v.VulnLowCount } + +// GetVulnInfoCount returns ArtifactScanDataRef.VulnInfoCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnInfoCount() *int { return v.VulnInfoCount } + +// GetVulnUnknownCount returns ArtifactScanDataRef.VulnUnknownCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnUnknownCount() *int { return v.VulnUnknownCount } + +// GetVulnNoneCount returns ArtifactScanDataRef.VulnNoneCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnNoneCount() *int { return v.VulnNoneCount } + +// GetVulnTotalCount returns ArtifactScanDataRef.VulnTotalCount, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVulnTotalCount() *int { return v.VulnTotalCount } + +// GetSbomUrl returns ArtifactScanDataRef.SbomUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSbomUrl() string { return v.SbomUrl } + +// GetArtifactLicenseScanUrl returns ArtifactScanDataRef.ArtifactLicenseScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactLicenseScanUrl() string { return v.ArtifactLicenseScanUrl } + +// GetArtifactSecretScanUrl returns ArtifactScanDataRef.ArtifactSecretScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactSecretScanUrl() string { return v.ArtifactSecretScanUrl } + +// GetSourceLicenseScanUrl returns ArtifactScanDataRef.SourceLicenseScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceLicenseScanUrl() string { return v.SourceLicenseScanUrl } + +// GetSourceSecretScanUrl returns ArtifactScanDataRef.SourceSecretScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSecretScanUrl() string { return v.SourceSecretScanUrl } + +// GetSourceScorecardScanUrl returns ArtifactScanDataRef.SourceScorecardScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceScorecardScanUrl() string { return v.SourceScorecardScanUrl } + +// GetSourceSemgrepHighSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepHighSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepHighSeverityScanUrl() string { + return v.SourceSemgrepHighSeverityScanUrl +} + +// GetSourceSemgrepMediumSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepMediumSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepMediumSeverityScanUrl() string { + return v.SourceSemgrepMediumSeverityScanUrl +} + +// GetSourceSemgrepLowSeverityScanUrl returns ArtifactScanDataRef.SourceSemgrepLowSeverityScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSemgrepLowSeverityScanUrl() string { + return v.SourceSemgrepLowSeverityScanUrl +} + +// GetSourceSnykScanUrl returns ArtifactScanDataRef.SourceSnykScanUrl, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetSourceSnykScanUrl() string { return v.SourceSnykScanUrl } + +// GetVirusTotalUrlScan returns ArtifactScanDataRef.VirusTotalUrlScan, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetVirusTotalUrlScan() string { return v.VirusTotalUrlScan } + +// GetRiskStatus returns ArtifactScanDataRef.RiskStatus, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetRiskStatus() RiskStatus { return v.RiskStatus } + +// GetArtifactRunHistory returns ArtifactScanDataRef.ArtifactRunHistory, and is useful for accessing the field via an interface. +func (v *ArtifactScanDataRef) GetArtifactRunHistory() []*RunHistoryRef { return v.ArtifactRunHistory } + +type BuildToolRef struct { + // id is randomly assigned + Id string `json:"id"` + // buildId is a unique job id, run id for a job/pipeline/action + BuildId string `json:"buildId"` + // tool is jenkins etc + Tool string `json:"tool"` + // buildName is the name of the job/pipeline/action + BuildName string `json:"buildName"` + BuildUrl string `json:"buildUrl"` + ArtifactType string `json:"artifactType"` + // artifact would be something like nginx without the tag + Artifact string `json:"artifact"` + // artifactTag would be the tag of the artifact + ArtifactTag string `json:"artifactTag"` + // digest is the sha of the artifact + Digest string `json:"digest"` + // buildDigest is the sha of the artifact as sent from the build tool + BuildDigest string `json:"buildDigest"` + ArtifactNode *ArtifactRef `json:"artifactNode,omitempty"` + // buildTime is the time at which the artifact was built + BuildTime *time.Time `json:"buildTime"` + // buildUser is the user that built the artifact + BuildUser string `json:"buildUser"` + SourceCodeTool *SourceCodeToolRef `json:"sourceCodeTool,omitempty"` + CommitMetaData []*CommitMetaDataRef `json:"commitMetaData,omitempty"` + CreatedAt *time.Time `json:"createdAt"` +} + +// GetId returns BuildToolRef.Id, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetId() string { return v.Id } + +// GetBuildId returns BuildToolRef.BuildId, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildId() string { return v.BuildId } + +// GetTool returns BuildToolRef.Tool, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetTool() string { return v.Tool } + +// GetBuildName returns BuildToolRef.BuildName, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildName() string { return v.BuildName } + +// GetBuildUrl returns BuildToolRef.BuildUrl, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildUrl() string { return v.BuildUrl } + +// GetArtifactType returns BuildToolRef.ArtifactType, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactType() string { return v.ArtifactType } + +// GetArtifact returns BuildToolRef.Artifact, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifact() string { return v.Artifact } + +// GetArtifactTag returns BuildToolRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetDigest returns BuildToolRef.Digest, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetDigest() string { return v.Digest } + +// GetBuildDigest returns BuildToolRef.BuildDigest, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildDigest() string { return v.BuildDigest } + +// GetArtifactNode returns BuildToolRef.ArtifactNode, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetArtifactNode() *ArtifactRef { return v.ArtifactNode } + +// GetBuildTime returns BuildToolRef.BuildTime, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildTime() *time.Time { return v.BuildTime } + +// GetBuildUser returns BuildToolRef.BuildUser, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetBuildUser() string { return v.BuildUser } + +// GetSourceCodeTool returns BuildToolRef.SourceCodeTool, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetSourceCodeTool() *SourceCodeToolRef { return v.SourceCodeTool } + +// GetCommitMetaData returns BuildToolRef.CommitMetaData, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetCommitMetaData() []*CommitMetaDataRef { return v.CommitMetaData } + +// GetCreatedAt returns BuildToolRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *BuildToolRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +type CWERef struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +// GetId returns CWERef.Id, and is useful for accessing the field via an interface. +func (v *CWERef) GetId() string { return v.Id } + +// GetName returns CWERef.Name, and is useful for accessing the field via an interface. +func (v *CWERef) GetName() string { return v.Name } + +// GetDescription returns CWERef.Description, and is useful for accessing the field via an interface. +func (v *CWERef) GetDescription() string { return v.Description } + +type CommitMetaDataRef struct { + // id is randomly assigned + Id *string `json:"id"` + // commit is a git commit that was used to build an artifact + Commit string `json:"commit"` + Repository string `json:"repository"` + // commitSign tells us whether the commit is signed + CommitSign *bool `json:"commitSign"` + NoOfReviewersConf *int `json:"noOfReviewersConf"` + ReviewerList []string `json:"reviewerList"` + ApproverList []string `json:"approverList"` + BuildTool *BuildToolRef `json:"buildTool,omitempty"` +} + +// GetId returns CommitMetaDataRef.Id, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetId() *string { return v.Id } + +// GetCommit returns CommitMetaDataRef.Commit, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetCommit() string { return v.Commit } + +// GetRepository returns CommitMetaDataRef.Repository, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetRepository() string { return v.Repository } + +// GetCommitSign returns CommitMetaDataRef.CommitSign, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetCommitSign() *bool { return v.CommitSign } + +// GetNoOfReviewersConf returns CommitMetaDataRef.NoOfReviewersConf, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetNoOfReviewersConf() *int { return v.NoOfReviewersConf } + +// GetReviewerList returns CommitMetaDataRef.ReviewerList, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetReviewerList() []string { return v.ReviewerList } + +// GetApproverList returns CommitMetaDataRef.ApproverList, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetApproverList() []string { return v.ApproverList } + +// GetBuildTool returns CommitMetaDataRef.BuildTool, and is useful for accessing the field via an interface. +func (v *CommitMetaDataRef) GetBuildTool() *BuildToolRef { return v.BuildTool } + +type ComponentRef struct { + Id string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Version string `json:"version"` + Licenses []string `json:"licenses"` + Purl string `json:"purl"` + Cpe string `json:"cpe"` + ScannedAt *time.Time `json:"scannedAt"` + Vulnerabilities []*VulnerabilityRef `json:"vulnerabilities,omitempty"` + Artifacts []*ArtifactScanDataRef `json:"artifacts,omitempty"` +} + +// GetId returns ComponentRef.Id, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetId() string { return v.Id } + +// GetType returns ComponentRef.Type, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetType() string { return v.Type } + +// GetName returns ComponentRef.Name, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetName() string { return v.Name } + +// GetVersion returns ComponentRef.Version, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetVersion() string { return v.Version } + +// GetLicenses returns ComponentRef.Licenses, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetLicenses() []string { return v.Licenses } + +// GetPurl returns ComponentRef.Purl, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetPurl() string { return v.Purl } + +// GetCpe returns ComponentRef.Cpe, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetCpe() string { return v.Cpe } + +// GetScannedAt returns ComponentRef.ScannedAt, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetScannedAt() *time.Time { return v.ScannedAt } + +// GetVulnerabilities returns ComponentRef.Vulnerabilities, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetVulnerabilities() []*VulnerabilityRef { return v.Vulnerabilities } + +// GetArtifacts returns ComponentRef.Artifacts, and is useful for accessing the field via an interface. +func (v *ComponentRef) GetArtifacts() []*ArtifactScanDataRef { return v.Artifacts } + +type CredentialsRef struct { + Id *string `json:"id"` + Data string `json:"data"` + Integrator *IntegratorRef `json:"integrator,omitempty"` +} + +// GetId returns CredentialsRef.Id, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetId() *string { return v.Id } + +// GetData returns CredentialsRef.Data, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetData() string { return v.Data } + +// GetIntegrator returns CredentialsRef.Integrator, and is useful for accessing the field via an interface. +func (v *CredentialsRef) GetIntegrator() *IntegratorRef { return v.Integrator } + +// DeploymentStage is an enum denoting the stage of the deployment. . +type DeploymentStage string + +const ( + // deployment is discovered from the events + DeploymentStageDiscovered DeploymentStage = "discovered" + // scanning is under process + DeploymentStageScanning DeploymentStage = "scanning" + // deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + DeploymentStageCurrent DeploymentStage = "current" + // deployment becomes a past deployment because another fresh deployment has happened + DeploymentStagePrevious DeploymentStage = "previous" + // deployment is blocked by the firewall + DeploymentStageBlocked DeploymentStage = "blocked" +) + +type DeploymentTargetRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + // this would be the ip/server address of the target environment + Ip string `json:"ip"` + Account string `json:"account"` + // this would be something like aws, gcp etc + TargetType string `json:"targetType"` + // this would be something like us-east-1 etc + Region string `json:"region"` + KubescapeServiceConnected string `json:"kubescapeServiceConnected"` + IsFirewall *bool `json:"isFirewall"` + Organization *OrganizationRef `json:"organization,omitempty"` + DefaultEnvironment *EnvironmentRef `json:"defaultEnvironment,omitempty"` +} + +// GetId returns DeploymentTargetRef.Id, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetId() string { return v.Id } + +// GetName returns DeploymentTargetRef.Name, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetName() string { return v.Name } + +// GetIp returns DeploymentTargetRef.Ip, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetIp() string { return v.Ip } + +// GetAccount returns DeploymentTargetRef.Account, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetAccount() string { return v.Account } + +// GetTargetType returns DeploymentTargetRef.TargetType, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetTargetType() string { return v.TargetType } + +// GetRegion returns DeploymentTargetRef.Region, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetRegion() string { return v.Region } + +// GetKubescapeServiceConnected returns DeploymentTargetRef.KubescapeServiceConnected, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetKubescapeServiceConnected() string { + return v.KubescapeServiceConnected +} + +// GetIsFirewall returns DeploymentTargetRef.IsFirewall, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetIsFirewall() *bool { return v.IsFirewall } + +// GetOrganization returns DeploymentTargetRef.Organization, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetDefaultEnvironment returns DeploymentTargetRef.DefaultEnvironment, and is useful for accessing the field via an interface. +func (v *DeploymentTargetRef) GetDefaultEnvironment() *EnvironmentRef { return v.DefaultEnvironment } + +type EnvironmentRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Purpose string `json:"purpose"` +} + +// GetId returns EnvironmentRef.Id, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetId() string { return v.Id } + +// GetOrganization returns EnvironmentRef.Organization, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetPurpose returns EnvironmentRef.Purpose, and is useful for accessing the field via an interface. +func (v *EnvironmentRef) GetPurpose() string { return v.Purpose } + +type FeatureModeRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Scan string `json:"scan"` + Type string `json:"type"` + Enabled *bool `json:"enabled"` + Category string `json:"category"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns FeatureModeRef.Id, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetId() string { return v.Id } + +// GetOrganization returns FeatureModeRef.Organization, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetScan returns FeatureModeRef.Scan, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetScan() string { return v.Scan } + +// GetType returns FeatureModeRef.Type, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetType() string { return v.Type } + +// GetEnabled returns FeatureModeRef.Enabled, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetEnabled() *bool { return v.Enabled } + +// GetCategory returns FeatureModeRef.Category, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetCategory() string { return v.Category } + +// GetCreatedAt returns FeatureModeRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns FeatureModeRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *FeatureModeRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +type IntegratorRef struct { + Id string `json:"id"` + Organization *OrganizationRef `json:"organization,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Category string `json:"category"` + Credentials *CredentialsRef `json:"credentials,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns IntegratorRef.Id, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetId() string { return v.Id } + +// GetOrganization returns IntegratorRef.Organization, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetName returns IntegratorRef.Name, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetName() string { return v.Name } + +// GetType returns IntegratorRef.Type, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetType() string { return v.Type } + +// GetCategory returns IntegratorRef.Category, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCategory() string { return v.Category } + +// GetCredentials returns IntegratorRef.Credentials, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCredentials() *CredentialsRef { return v.Credentials } + +// GetCreatedAt returns IntegratorRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns IntegratorRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *IntegratorRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +type KeyValueRef struct { + Id string `json:"id"` + Name string `json:"name"` + Value string `json:"value"` +} + +// GetId returns KeyValueRef.Id, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetId() string { return v.Id } + +// GetName returns KeyValueRef.Name, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetName() string { return v.Name } + +// GetValue returns KeyValueRef.Value, and is useful for accessing the field via an interface. +func (v *KeyValueRef) GetValue() string { return v.Value } + +type OrganizationRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Teams []*TeamRef `json:"teams,omitempty"` + Environments []*DeploymentTargetRef `json:"environments,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + Integrators []*IntegratorRef `json:"integrators,omitempty"` + FeatureModes []*FeatureModeRef `json:"featureModes,omitempty"` +} + +// GetId returns OrganizationRef.Id, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetId() string { return v.Id } + +// GetName returns OrganizationRef.Name, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetName() string { return v.Name } + +// GetRoles returns OrganizationRef.Roles, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetRoles() []*RoleRef { return v.Roles } + +// GetTeams returns OrganizationRef.Teams, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetTeams() []*TeamRef { return v.Teams } + +// GetEnvironments returns OrganizationRef.Environments, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetEnvironments() []*DeploymentTargetRef { return v.Environments } + +// GetPolicies returns OrganizationRef.Policies, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns OrganizationRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetPolicyEnforcements() []*PolicyEnforcementRef { + return v.PolicyEnforcements +} + +// GetIntegrators returns OrganizationRef.Integrators, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetIntegrators() []*IntegratorRef { return v.Integrators } + +// GetFeatureModes returns OrganizationRef.FeatureModes, and is useful for accessing the field via an interface. +func (v *OrganizationRef) GetFeatureModes() []*FeatureModeRef { return v.FeatureModes } + +type PolicyDefinitionRef struct { + Id string `json:"id"` + OwnerOrg *OrganizationRef `json:"ownerOrg,omitempty"` + OwnerTeam *TeamRef `json:"ownerTeam,omitempty"` + OwnerApplication *ApplicationRef `json:"ownerApplication,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + PolicyName string `json:"policyName"` + Category string `json:"category"` + Stage string `json:"stage"` + Description string `json:"description"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + Script string `json:"script"` + Variables string `json:"variables"` + ConditionName string `json:"conditionName"` + Suggestion string `json:"suggestion"` +} + +// GetId returns PolicyDefinitionRef.Id, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetId() string { return v.Id } + +// GetOwnerOrg returns PolicyDefinitionRef.OwnerOrg, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerOrg() *OrganizationRef { return v.OwnerOrg } + +// GetOwnerTeam returns PolicyDefinitionRef.OwnerTeam, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerTeam() *TeamRef { return v.OwnerTeam } + +// GetOwnerApplication returns PolicyDefinitionRef.OwnerApplication, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetOwnerApplication() *ApplicationRef { return v.OwnerApplication } + +// GetCreatedAt returns PolicyDefinitionRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns PolicyDefinitionRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicyName returns PolicyDefinitionRef.PolicyName, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetPolicyName() string { return v.PolicyName } + +// GetCategory returns PolicyDefinitionRef.Category, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetCategory() string { return v.Category } + +// GetStage returns PolicyDefinitionRef.Stage, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetStage() string { return v.Stage } + +// GetDescription returns PolicyDefinitionRef.Description, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetDescription() string { return v.Description } + +// GetScheduledPolicy returns PolicyDefinitionRef.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetScript returns PolicyDefinitionRef.Script, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetScript() string { return v.Script } + +// GetVariables returns PolicyDefinitionRef.Variables, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetVariables() string { return v.Variables } + +// GetConditionName returns PolicyDefinitionRef.ConditionName, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetConditionName() string { return v.ConditionName } + +// GetSuggestion returns PolicyDefinitionRef.Suggestion, and is useful for accessing the field via an interface. +func (v *PolicyDefinitionRef) GetSuggestion() string { return v.Suggestion } + +type PolicyEnforcementRef struct { + Id *string `json:"id"` + Policy *PolicyDefinitionRef `json:"policy,omitempty"` + EnforcedOrg *OrganizationRef `json:"enforcedOrg,omitempty"` + EnforcedTeam *TeamRef `json:"enforcedTeam,omitempty"` + EnforcedApplication *ApplicationRef `json:"enforcedApplication,omitempty"` + Status *bool `json:"status"` + ForceApply *bool `json:"forceApply"` + Severity Severity `json:"severity"` + DatasourceTool string `json:"datasourceTool"` + Action string `json:"action"` + ConditionValue string `json:"conditionValue"` + Environments []*EnvironmentRef `json:"environments,omitempty"` + Tags []*TagRef `json:"tags,omitempty"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetId returns PolicyEnforcementRef.Id, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetId() *string { return v.Id } + +// GetPolicy returns PolicyEnforcementRef.Policy, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetPolicy() *PolicyDefinitionRef { return v.Policy } + +// GetEnforcedOrg returns PolicyEnforcementRef.EnforcedOrg, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedOrg() *OrganizationRef { return v.EnforcedOrg } + +// GetEnforcedTeam returns PolicyEnforcementRef.EnforcedTeam, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedTeam() *TeamRef { return v.EnforcedTeam } + +// GetEnforcedApplication returns PolicyEnforcementRef.EnforcedApplication, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnforcedApplication() *ApplicationRef { return v.EnforcedApplication } + +// GetStatus returns PolicyEnforcementRef.Status, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetStatus() *bool { return v.Status } + +// GetForceApply returns PolicyEnforcementRef.ForceApply, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetForceApply() *bool { return v.ForceApply } + +// GetSeverity returns PolicyEnforcementRef.Severity, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetSeverity() Severity { return v.Severity } + +// GetDatasourceTool returns PolicyEnforcementRef.DatasourceTool, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetDatasourceTool() string { return v.DatasourceTool } + +// GetAction returns PolicyEnforcementRef.Action, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetAction() string { return v.Action } + +// GetConditionValue returns PolicyEnforcementRef.ConditionValue, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetConditionValue() string { return v.ConditionValue } + +// GetEnvironments returns PolicyEnforcementRef.Environments, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetEnvironments() []*EnvironmentRef { return v.Environments } + +// GetTags returns PolicyEnforcementRef.Tags, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetTags() []*TagRef { return v.Tags } + +// GetCreatedAt returns PolicyEnforcementRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns PolicyEnforcementRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *PolicyEnforcementRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// RiskStatus tells us what risk a current application instance or a deployment is at. +type RiskStatus string + +const ( + RiskStatusLowrisk RiskStatus = "lowrisk" + RiskStatusMediumrisk RiskStatus = "mediumrisk" + RiskStatusHighrisk RiskStatus = "highrisk" + RiskStatusApocalypserisk RiskStatus = "apocalypserisk" + RiskStatusScanning RiskStatus = "scanning" +) + +type RolePermission string + +const ( + RolePermissionAdmin RolePermission = "admin" + RolePermissionWrite RolePermission = "write" + RolePermissionRead RolePermission = "read" +) + +type RoleRef struct { + // id is randomly assigned + Id string `json:"id"` + // group should be a URI format that includes a scope or realm + Group string `json:"group"` + Permission RolePermission `json:"permission"` +} + +// GetId returns RoleRef.Id, and is useful for accessing the field via an interface. +func (v *RoleRef) GetId() string { return v.Id } + +// GetGroup returns RoleRef.Group, and is useful for accessing the field via an interface. +func (v *RoleRef) GetGroup() string { return v.Group } + +// GetPermission returns RoleRef.Permission, and is useful for accessing the field via an interface. +func (v *RoleRef) GetPermission() RolePermission { return v.Permission } + +type RunHistoryRef struct { + Id *string `json:"id"` + PolicyId string `json:"policyId"` + ApplicationDeployment *ApplicationDeploymentRef `json:"applicationDeployment,omitempty"` + ArtifactScan *ArtifactScanDataRef `json:"artifactScan,omitempty"` + PolicyName string `json:"PolicyName"` + Stage string `json:"Stage"` + Artifact string `json:"Artifact"` + ArtifactTag string `json:"ArtifactTag"` + ArtifactSha string `json:"ArtifactSha"` + ArtifactNameTag string `json:"ArtifactNameTag"` + DatasourceTool string `json:"DatasourceTool"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + DeployedAt *time.Time `json:"DeployedAt"` + Hash string `json:"Hash"` + Pass *bool `json:"Pass"` + MetaData string `json:"MetaData"` + FileApi string `json:"FileApi"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + PolicyEnforcements *PolicyEnforcementRef `json:"policyEnforcements,omitempty"` + SecurityIssue *SecurityIssueRef `json:"securityIssue,omitempty"` +} + +// GetId returns RunHistoryRef.Id, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetId() *string { return v.Id } + +// GetPolicyId returns RunHistoryRef.PolicyId, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyId() string { return v.PolicyId } + +// GetApplicationDeployment returns RunHistoryRef.ApplicationDeployment, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetApplicationDeployment() *ApplicationDeploymentRef { + return v.ApplicationDeployment +} + +// GetArtifactScan returns RunHistoryRef.ArtifactScan, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactScan() *ArtifactScanDataRef { return v.ArtifactScan } + +// GetPolicyName returns RunHistoryRef.PolicyName, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyName() string { return v.PolicyName } + +// GetStage returns RunHistoryRef.Stage, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetStage() string { return v.Stage } + +// GetArtifact returns RunHistoryRef.Artifact, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifact() string { return v.Artifact } + +// GetArtifactTag returns RunHistoryRef.ArtifactTag, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactTag() string { return v.ArtifactTag } + +// GetArtifactSha returns RunHistoryRef.ArtifactSha, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactSha() string { return v.ArtifactSha } + +// GetArtifactNameTag returns RunHistoryRef.ArtifactNameTag, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetArtifactNameTag() string { return v.ArtifactNameTag } + +// GetDatasourceTool returns RunHistoryRef.DatasourceTool, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetDatasourceTool() string { return v.DatasourceTool } + +// GetCreatedAt returns RunHistoryRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns RunHistoryRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetDeployedAt returns RunHistoryRef.DeployedAt, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetDeployedAt() *time.Time { return v.DeployedAt } + +// GetHash returns RunHistoryRef.Hash, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetHash() string { return v.Hash } + +// GetPass returns RunHistoryRef.Pass, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPass() *bool { return v.Pass } + +// GetMetaData returns RunHistoryRef.MetaData, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetMetaData() string { return v.MetaData } + +// GetFileApi returns RunHistoryRef.FileApi, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetFileApi() string { return v.FileApi } + +// GetScheduledPolicy returns RunHistoryRef.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetPolicyEnforcements returns RunHistoryRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetPolicyEnforcements() *PolicyEnforcementRef { return v.PolicyEnforcements } + +// GetSecurityIssue returns RunHistoryRef.SecurityIssue, and is useful for accessing the field via an interface. +func (v *RunHistoryRef) GetSecurityIssue() *SecurityIssueRef { return v.SecurityIssue } + +type SecurityIssueRef struct { + Id *string `json:"id"` + AlertTitle string `json:"AlertTitle"` + AlertMessage string `json:"AlertMessage"` + Suggestions string `json:"Suggestions"` + Severity Severity `json:"Severity"` + CreatedAt *time.Time `json:"CreatedAt"` + UpdatedAt *time.Time `json:"UpdatedAt"` + Action string `json:"Action"` + JiraUrl string `json:"JiraUrl"` + Status string `json:"Status"` + Reason string `json:"Reason"` + Error string `json:"Error"` + Affects []*RunHistoryRef `json:"Affects,omitempty"` +} + +// GetId returns SecurityIssueRef.Id, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetId() *string { return v.Id } + +// GetAlertTitle returns SecurityIssueRef.AlertTitle, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAlertTitle() string { return v.AlertTitle } + +// GetAlertMessage returns SecurityIssueRef.AlertMessage, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAlertMessage() string { return v.AlertMessage } + +// GetSuggestions returns SecurityIssueRef.Suggestions, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetSuggestions() string { return v.Suggestions } + +// GetSeverity returns SecurityIssueRef.Severity, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetSeverity() Severity { return v.Severity } + +// GetCreatedAt returns SecurityIssueRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns SecurityIssueRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetAction returns SecurityIssueRef.Action, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAction() string { return v.Action } + +// GetJiraUrl returns SecurityIssueRef.JiraUrl, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetJiraUrl() string { return v.JiraUrl } + +// GetStatus returns SecurityIssueRef.Status, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetStatus() string { return v.Status } + +// GetReason returns SecurityIssueRef.Reason, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetReason() string { return v.Reason } + +// GetError returns SecurityIssueRef.Error, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetError() string { return v.Error } + +// GetAffects returns SecurityIssueRef.Affects, and is useful for accessing the field via an interface. +func (v *SecurityIssueRef) GetAffects() []*RunHistoryRef { return v.Affects } + +type Severity string + +const ( + SeverityCritical Severity = "critical" + SeverityHigh Severity = "high" + SeverityMedium Severity = "medium" + SeverityLow Severity = "low" + SeverityInfo Severity = "info" + SeverityNone Severity = "none" + SeverityUnknown Severity = "unknown" +) + +type SourceCodeToolRef struct { + // id is randomly assigned + Id string `json:"id"` + CreatedAt *time.Time `json:"createdAt"` + // scm is the scm tool github/gitlab etc + Scm string `json:"scm"` + // repository is the git remote repository + Repository string `json:"repository"` + // branch is the git branch on which the artifact was built + Branch string `json:"branch"` + // headCommit is the checkout out head commit + HeadCommit string `json:"headCommit"` + // diffCommits is a comma separated string of the commits between the previous built artifact and the current + DiffCommits string `json:"diffCommits"` + LicenseName string `json:"licenseName"` + Visibility string `json:"visibility"` + WorkflowName string `json:"workflowName"` + // parentRepo is populated in case the git repo is a fork + ParentRepo string `json:"parentRepo"` + BuildTool *BuildToolRef `json:"buildTool,omitempty"` +} + +// GetId returns SourceCodeToolRef.Id, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetId() string { return v.Id } + +// GetCreatedAt returns SourceCodeToolRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetScm returns SourceCodeToolRef.Scm, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetScm() string { return v.Scm } + +// GetRepository returns SourceCodeToolRef.Repository, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetRepository() string { return v.Repository } + +// GetBranch returns SourceCodeToolRef.Branch, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetBranch() string { return v.Branch } + +// GetHeadCommit returns SourceCodeToolRef.HeadCommit, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetHeadCommit() string { return v.HeadCommit } + +// GetDiffCommits returns SourceCodeToolRef.DiffCommits, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetDiffCommits() string { return v.DiffCommits } + +// GetLicenseName returns SourceCodeToolRef.LicenseName, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetLicenseName() string { return v.LicenseName } + +// GetVisibility returns SourceCodeToolRef.Visibility, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetVisibility() string { return v.Visibility } + +// GetWorkflowName returns SourceCodeToolRef.WorkflowName, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetWorkflowName() string { return v.WorkflowName } + +// GetParentRepo returns SourceCodeToolRef.ParentRepo, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetParentRepo() string { return v.ParentRepo } + +// GetBuildTool returns SourceCodeToolRef.BuildTool, and is useful for accessing the field via an interface. +func (v *SourceCodeToolRef) GetBuildTool() *BuildToolRef { return v.BuildTool } + +type TagRef struct { + Id string `json:"id"` + TagName string `json:"tagName"` + TagValue string `json:"tagValue"` + TagDescription string `json:"tagDescription"` + CreatedBy string `json:"createdBy"` + CreatedAt *time.Time `json:"createdAt"` + UpdatedAt *time.Time `json:"updatedAt"` + Policies []*PolicyEnforcementRef `json:"policies,omitempty"` +} + +// GetId returns TagRef.Id, and is useful for accessing the field via an interface. +func (v *TagRef) GetId() string { return v.Id } + +// GetTagName returns TagRef.TagName, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagName() string { return v.TagName } + +// GetTagValue returns TagRef.TagValue, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagValue() string { return v.TagValue } + +// GetTagDescription returns TagRef.TagDescription, and is useful for accessing the field via an interface. +func (v *TagRef) GetTagDescription() string { return v.TagDescription } + +// GetCreatedBy returns TagRef.CreatedBy, and is useful for accessing the field via an interface. +func (v *TagRef) GetCreatedBy() string { return v.CreatedBy } + +// GetCreatedAt returns TagRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *TagRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetUpdatedAt returns TagRef.UpdatedAt, and is useful for accessing the field via an interface. +func (v *TagRef) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// GetPolicies returns TagRef.Policies, and is useful for accessing the field via an interface. +func (v *TagRef) GetPolicies() []*PolicyEnforcementRef { return v.Policies } + +type TeamRef struct { + // id is randomly assigned + Id string `json:"id"` + Name string `json:"name"` + Roles []*RoleRef `json:"roles,omitempty"` + Organization *OrganizationRef `json:"organization,omitempty"` + Applications []*ApplicationRef `json:"applications,omitempty"` + Labels []*KeyValueRef `json:"labels,omitempty"` + Policies []*PolicyDefinitionRef `json:"policies,omitempty"` + PolicyEnforcements []*PolicyEnforcementRef `json:"policyEnforcements,omitempty"` +} + +// GetId returns TeamRef.Id, and is useful for accessing the field via an interface. +func (v *TeamRef) GetId() string { return v.Id } + +// GetName returns TeamRef.Name, and is useful for accessing the field via an interface. +func (v *TeamRef) GetName() string { return v.Name } + +// GetRoles returns TeamRef.Roles, and is useful for accessing the field via an interface. +func (v *TeamRef) GetRoles() []*RoleRef { return v.Roles } + +// GetOrganization returns TeamRef.Organization, and is useful for accessing the field via an interface. +func (v *TeamRef) GetOrganization() *OrganizationRef { return v.Organization } + +// GetApplications returns TeamRef.Applications, and is useful for accessing the field via an interface. +func (v *TeamRef) GetApplications() []*ApplicationRef { return v.Applications } + +// GetLabels returns TeamRef.Labels, and is useful for accessing the field via an interface. +func (v *TeamRef) GetLabels() []*KeyValueRef { return v.Labels } + +// GetPolicies returns TeamRef.Policies, and is useful for accessing the field via an interface. +func (v *TeamRef) GetPolicies() []*PolicyDefinitionRef { return v.Policies } + +// GetPolicyEnforcements returns TeamRef.PolicyEnforcements, and is useful for accessing the field via an interface. +func (v *TeamRef) GetPolicyEnforcements() []*PolicyEnforcementRef { return v.PolicyEnforcements } + +type ToolsUsedRef struct { + Id *string `json:"id"` + Source string `json:"source"` + Build string `json:"build"` + Artifact string `json:"artifact"` + Deploy string `json:"deploy"` + Sbom string `json:"sbom"` + Misc []string `json:"misc"` +} + +// GetId returns ToolsUsedRef.Id, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetId() *string { return v.Id } + +// GetSource returns ToolsUsedRef.Source, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetSource() string { return v.Source } + +// GetBuild returns ToolsUsedRef.Build, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetBuild() string { return v.Build } + +// GetArtifact returns ToolsUsedRef.Artifact, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetArtifact() string { return v.Artifact } + +// GetDeploy returns ToolsUsedRef.Deploy, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetDeploy() string { return v.Deploy } + +// GetSbom returns ToolsUsedRef.Sbom, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetSbom() string { return v.Sbom } + +// GetMisc returns ToolsUsedRef.Misc, and is useful for accessing the field via an interface. +func (v *ToolsUsedRef) GetMisc() []string { return v.Misc } + +type VulnerabilityRef struct { + Id string `json:"id"` + Parent string `json:"parent"` + Ratings Severity `json:"ratings"` + Cwes []*CWERef `json:"cwes,omitempty"` + Summary string `json:"summary"` + Detail string `json:"detail"` + Recommendation string `json:"recommendation"` + Published *time.Time `json:"published"` + Modified *time.Time `json:"modified"` + CreatedAt *time.Time `json:"createdAt"` + Cvss float64 `json:"cvss"` + Priority string `json:"priority"` + Epss float64 `json:"epss"` + Cisa_kev string `json:"cisa_kev"` + Affects []*ComponentRef `json:"affects,omitempty"` +} + +// GetId returns VulnerabilityRef.Id, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetId() string { return v.Id } + +// GetParent returns VulnerabilityRef.Parent, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetParent() string { return v.Parent } + +// GetRatings returns VulnerabilityRef.Ratings, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetRatings() Severity { return v.Ratings } + +// GetCwes returns VulnerabilityRef.Cwes, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCwes() []*CWERef { return v.Cwes } + +// GetSummary returns VulnerabilityRef.Summary, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetSummary() string { return v.Summary } + +// GetDetail returns VulnerabilityRef.Detail, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetDetail() string { return v.Detail } + +// GetRecommendation returns VulnerabilityRef.Recommendation, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetRecommendation() string { return v.Recommendation } + +// GetPublished returns VulnerabilityRef.Published, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetPublished() *time.Time { return v.Published } + +// GetModified returns VulnerabilityRef.Modified, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetModified() *time.Time { return v.Modified } + +// GetCreatedAt returns VulnerabilityRef.CreatedAt, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCreatedAt() *time.Time { return v.CreatedAt } + +// GetCvss returns VulnerabilityRef.Cvss, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCvss() float64 { return v.Cvss } + +// GetPriority returns VulnerabilityRef.Priority, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetPriority() string { return v.Priority } + +// GetEpss returns VulnerabilityRef.Epss, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetEpss() float64 { return v.Epss } + +// GetCisa_kev returns VulnerabilityRef.Cisa_kev, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetCisa_kev() string { return v.Cisa_kev } + +// GetAffects returns VulnerabilityRef.Affects, and is useful for accessing the field via an interface. +func (v *VulnerabilityRef) GetAffects() []*ComponentRef { return v.Affects } + +// __addPolicyDefinitionInput is used internally by genqlient +type __addPolicyDefinitionInput struct { + Input []*AddPolicyDefinitionInput `json:"input,omitempty"` +} + +// GetInput returns __addPolicyDefinitionInput.Input, and is useful for accessing the field via an interface. +func (v *__addPolicyDefinitionInput) GetInput() []*AddPolicyDefinitionInput { return v.Input } + +// __addPolicyEnforcementInput is used internally by genqlient +type __addPolicyEnforcementInput struct { + Input []*AddPolicyEnforcementInput `json:"input,omitempty"` +} + +// GetInput returns __addPolicyEnforcementInput.Input, and is useful for accessing the field via an interface. +func (v *__addPolicyEnforcementInput) GetInput() []*AddPolicyEnforcementInput { return v.Input } + +// __addTagInput is used internally by genqlient +type __addTagInput struct { + Tags []*AddTagInput `json:"tags,omitempty"` +} + +// GetTags returns __addTagInput.Tags, and is useful for accessing the field via an interface. +func (v *__addTagInput) GetTags() []*AddTagInput { return v.Tags } + +// __assignTagsToPolicyInput is used internally by genqlient +type __assignTagsToPolicyInput struct { + PolicyEnforcementId *string `json:"policyEnforcementId"` + Tags *TagRef `json:"tags,omitempty"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetPolicyEnforcementId returns __assignTagsToPolicyInput.PolicyEnforcementId, and is useful for accessing the field via an interface. +func (v *__assignTagsToPolicyInput) GetPolicyEnforcementId() *string { return v.PolicyEnforcementId } + +// GetTags returns __assignTagsToPolicyInput.Tags, and is useful for accessing the field via an interface. +func (v *__assignTagsToPolicyInput) GetTags() *TagRef { return v.Tags } + +// GetUpdatedAt returns __assignTagsToPolicyInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *__assignTagsToPolicyInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// __checkIfExactPolicyDefinitionExistsInput is used internally by genqlient +type __checkIfExactPolicyDefinitionExistsInput struct { + PolicyName string `json:"policyName"` + Category string `json:"category"` + Stage string `json:"stage"` + Description string `json:"description"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + Script string `json:"script"` + Variables string `json:"variables"` + ConditionName string `json:"conditionName"` + Suggestion string `json:"suggestion"` + OrgId string `json:"orgId"` +} + +// GetPolicyName returns __checkIfExactPolicyDefinitionExistsInput.PolicyName, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetPolicyName() string { return v.PolicyName } + +// GetCategory returns __checkIfExactPolicyDefinitionExistsInput.Category, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetCategory() string { return v.Category } + +// GetStage returns __checkIfExactPolicyDefinitionExistsInput.Stage, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetStage() string { return v.Stage } + +// GetDescription returns __checkIfExactPolicyDefinitionExistsInput.Description, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetDescription() string { return v.Description } + +// GetScheduledPolicy returns __checkIfExactPolicyDefinitionExistsInput.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetScheduledPolicy() *bool { + return v.ScheduledPolicy +} + +// GetScript returns __checkIfExactPolicyDefinitionExistsInput.Script, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetScript() string { return v.Script } + +// GetVariables returns __checkIfExactPolicyDefinitionExistsInput.Variables, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetVariables() string { return v.Variables } + +// GetConditionName returns __checkIfExactPolicyDefinitionExistsInput.ConditionName, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetConditionName() string { return v.ConditionName } + +// GetSuggestion returns __checkIfExactPolicyDefinitionExistsInput.Suggestion, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetSuggestion() string { return v.Suggestion } + +// GetOrgId returns __checkIfExactPolicyDefinitionExistsInput.OrgId, and is useful for accessing the field via an interface. +func (v *__checkIfExactPolicyDefinitionExistsInput) GetOrgId() string { return v.OrgId } + +// __checkIfPolicyEnforcementExistsInput is used internally by genqlient +type __checkIfPolicyEnforcementExistsInput struct { + DatasourceTool string `json:"datasourceTool"` + PolicyId string `json:"policyId"` +} + +// GetDatasourceTool returns __checkIfPolicyEnforcementExistsInput.DatasourceTool, and is useful for accessing the field via an interface. +func (v *__checkIfPolicyEnforcementExistsInput) GetDatasourceTool() string { return v.DatasourceTool } + +// GetPolicyId returns __checkIfPolicyEnforcementExistsInput.PolicyId, and is useful for accessing the field via an interface. +func (v *__checkIfPolicyEnforcementExistsInput) GetPolicyId() string { return v.PolicyId } + +// __checkIfPolicyNameExistsInput is used internally by genqlient +type __checkIfPolicyNameExistsInput struct { + PolicyName string `json:"policyName"` + OrgId string `json:"orgId"` +} + +// GetPolicyName returns __checkIfPolicyNameExistsInput.PolicyName, and is useful for accessing the field via an interface. +func (v *__checkIfPolicyNameExistsInput) GetPolicyName() string { return v.PolicyName } + +// GetOrgId returns __checkIfPolicyNameExistsInput.OrgId, and is useful for accessing the field via an interface. +func (v *__checkIfPolicyNameExistsInput) GetOrgId() string { return v.OrgId } + +// __checkIfTagExistsInput is used internally by genqlient +type __checkIfTagExistsInput struct { + TagName string `json:"tagName"` + TagValue string `json:"tagValue"` + CreatedBy string `json:"createdBy"` +} + +// GetTagName returns __checkIfTagExistsInput.TagName, and is useful for accessing the field via an interface. +func (v *__checkIfTagExistsInput) GetTagName() string { return v.TagName } + +// GetTagValue returns __checkIfTagExistsInput.TagValue, and is useful for accessing the field via an interface. +func (v *__checkIfTagExistsInput) GetTagValue() string { return v.TagValue } + +// GetCreatedBy returns __checkIfTagExistsInput.CreatedBy, and is useful for accessing the field via an interface. +func (v *__checkIfTagExistsInput) GetCreatedBy() string { return v.CreatedBy } + +// __getLastPolicyIdInput is used internally by genqlient +type __getLastPolicyIdInput struct { + OrganizationId string `json:"organizationId"` +} + +// GetOrganizationId returns __getLastPolicyIdInput.OrganizationId, and is useful for accessing the field via an interface. +func (v *__getLastPolicyIdInput) GetOrganizationId() string { return v.OrganizationId } + +// __updatePolicyDefinitionInput is used internally by genqlient +type __updatePolicyDefinitionInput struct { + PolicyId string `json:"policyId"` + Category string `json:"category"` + Stage string `json:"stage"` + Description string `json:"description"` + ScheduledPolicy *bool `json:"scheduledPolicy"` + Script string `json:"script"` + Variables string `json:"variables"` + ConditionName string `json:"conditionName"` + Suggestion string `json:"suggestion"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetPolicyId returns __updatePolicyDefinitionInput.PolicyId, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetPolicyId() string { return v.PolicyId } + +// GetCategory returns __updatePolicyDefinitionInput.Category, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetCategory() string { return v.Category } + +// GetStage returns __updatePolicyDefinitionInput.Stage, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetStage() string { return v.Stage } + +// GetDescription returns __updatePolicyDefinitionInput.Description, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetDescription() string { return v.Description } + +// GetScheduledPolicy returns __updatePolicyDefinitionInput.ScheduledPolicy, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetScheduledPolicy() *bool { return v.ScheduledPolicy } + +// GetScript returns __updatePolicyDefinitionInput.Script, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetScript() string { return v.Script } + +// GetVariables returns __updatePolicyDefinitionInput.Variables, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetVariables() string { return v.Variables } + +// GetConditionName returns __updatePolicyDefinitionInput.ConditionName, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetConditionName() string { return v.ConditionName } + +// GetSuggestion returns __updatePolicyDefinitionInput.Suggestion, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetSuggestion() string { return v.Suggestion } + +// GetUpdatedAt returns __updatePolicyDefinitionInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *__updatePolicyDefinitionInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// __updatePolicyEnforcementInput is used internally by genqlient +type __updatePolicyEnforcementInput struct { + ConditionValue string `json:"conditionValue"` + PolicyEnfId *string `json:"policyEnfId"` + UpdatedAt *time.Time `json:"updatedAt"` +} + +// GetConditionValue returns __updatePolicyEnforcementInput.ConditionValue, and is useful for accessing the field via an interface. +func (v *__updatePolicyEnforcementInput) GetConditionValue() string { return v.ConditionValue } + +// GetPolicyEnfId returns __updatePolicyEnforcementInput.PolicyEnfId, and is useful for accessing the field via an interface. +func (v *__updatePolicyEnforcementInput) GetPolicyEnfId() *string { return v.PolicyEnfId } + +// GetUpdatedAt returns __updatePolicyEnforcementInput.UpdatedAt, and is useful for accessing the field via an interface. +func (v *__updatePolicyEnforcementInput) GetUpdatedAt() *time.Time { return v.UpdatedAt } + +// addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload includes the requested fields of the GraphQL type AddPolicyDefinitionPayload. +type addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload.NumUids, and is useful for accessing the field via an interface. +func (v *addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload) GetNumUids() *int { + return v.NumUids +} + +// addPolicyDefinitionResponse is returned by addPolicyDefinition on success. +type addPolicyDefinitionResponse struct { + AddPolicyDefinition *addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload `json:"addPolicyDefinition"` +} + +// GetAddPolicyDefinition returns addPolicyDefinitionResponse.AddPolicyDefinition, and is useful for accessing the field via an interface. +func (v *addPolicyDefinitionResponse) GetAddPolicyDefinition() *addPolicyDefinitionAddPolicyDefinitionAddPolicyDefinitionPayload { + return v.AddPolicyDefinition +} + +// addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload includes the requested fields of the GraphQL type AddPolicyEnforcementPayload. +type addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload.NumUids, and is useful for accessing the field via an interface. +func (v *addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload) GetNumUids() *int { + return v.NumUids +} + +// addPolicyEnforcementResponse is returned by addPolicyEnforcement on success. +type addPolicyEnforcementResponse struct { + AddPolicyEnforcement *addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload `json:"addPolicyEnforcement"` +} + +// GetAddPolicyEnforcement returns addPolicyEnforcementResponse.AddPolicyEnforcement, and is useful for accessing the field via an interface. +func (v *addPolicyEnforcementResponse) GetAddPolicyEnforcement() *addPolicyEnforcementAddPolicyEnforcementAddPolicyEnforcementPayload { + return v.AddPolicyEnforcement +} + +// addTagAddTagAddTagPayload includes the requested fields of the GraphQL type AddTagPayload. +type addTagAddTagAddTagPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns addTagAddTagAddTagPayload.NumUids, and is useful for accessing the field via an interface. +func (v *addTagAddTagAddTagPayload) GetNumUids() *int { return v.NumUids } + +// addTagResponse is returned by addTag on success. +type addTagResponse struct { + AddTag *addTagAddTagAddTagPayload `json:"addTag"` +} + +// GetAddTag returns addTagResponse.AddTag, and is useful for accessing the field via an interface. +func (v *addTagResponse) GetAddTag() *addTagAddTagAddTagPayload { return v.AddTag } + +// assignTagsToPolicyResponse is returned by assignTagsToPolicy on success. +type assignTagsToPolicyResponse struct { + UpdatePolicyEnforcement *assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload `json:"updatePolicyEnforcement"` +} + +// GetUpdatePolicyEnforcement returns assignTagsToPolicyResponse.UpdatePolicyEnforcement, and is useful for accessing the field via an interface. +func (v *assignTagsToPolicyResponse) GetUpdatePolicyEnforcement() *assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload { + return v.UpdatePolicyEnforcement +} + +// assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload includes the requested fields of the GraphQL type UpdatePolicyEnforcementPayload. +type assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload.NumUids, and is useful for accessing the field via an interface. +func (v *assignTagsToPolicyUpdatePolicyEnforcementUpdatePolicyEnforcementPayload) GetNumUids() *int { + return v.NumUids +} + +// checkIfExactPolicyDefinitionExistsQueryPolicyDefinition includes the requested fields of the GraphQL type PolicyDefinition. +type checkIfExactPolicyDefinitionExistsQueryPolicyDefinition struct { + Id string `json:"id"` + OwnerOrg *checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization `json:"ownerOrg"` +} + +// GetId returns checkIfExactPolicyDefinitionExistsQueryPolicyDefinition.Id, and is useful for accessing the field via an interface. +func (v *checkIfExactPolicyDefinitionExistsQueryPolicyDefinition) GetId() string { return v.Id } + +// GetOwnerOrg returns checkIfExactPolicyDefinitionExistsQueryPolicyDefinition.OwnerOrg, and is useful for accessing the field via an interface. +func (v *checkIfExactPolicyDefinitionExistsQueryPolicyDefinition) GetOwnerOrg() *checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization { + return v.OwnerOrg +} + +// checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization includes the requested fields of the GraphQL type Organization. +type checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization struct { + // id is randomly assigned + Id string `json:"id"` +} + +// GetId returns checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization.Id, and is useful for accessing the field via an interface. +func (v *checkIfExactPolicyDefinitionExistsQueryPolicyDefinitionOwnerOrgOrganization) GetId() string { + return v.Id +} + +// checkIfExactPolicyDefinitionExistsResponse is returned by checkIfExactPolicyDefinitionExists on success. +type checkIfExactPolicyDefinitionExistsResponse struct { + QueryPolicyDefinition []*checkIfExactPolicyDefinitionExistsQueryPolicyDefinition `json:"queryPolicyDefinition"` +} + +// GetQueryPolicyDefinition returns checkIfExactPolicyDefinitionExistsResponse.QueryPolicyDefinition, and is useful for accessing the field via an interface. +func (v *checkIfExactPolicyDefinitionExistsResponse) GetQueryPolicyDefinition() []*checkIfExactPolicyDefinitionExistsQueryPolicyDefinition { + return v.QueryPolicyDefinition +} + +// checkIfPolicyEnforcementExistsQueryPolicyEnforcement includes the requested fields of the GraphQL type PolicyEnforcement. +type checkIfPolicyEnforcementExistsQueryPolicyEnforcement struct { + Id *string `json:"id"` + ConditionValue string `json:"conditionValue"` + Policy *checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition `json:"policy"` + Tags []*checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag `json:"tags"` +} + +// GetId returns checkIfPolicyEnforcementExistsQueryPolicyEnforcement.Id, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcement) GetId() *string { return v.Id } + +// GetConditionValue returns checkIfPolicyEnforcementExistsQueryPolicyEnforcement.ConditionValue, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcement) GetConditionValue() string { + return v.ConditionValue +} + +// GetPolicy returns checkIfPolicyEnforcementExistsQueryPolicyEnforcement.Policy, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcement) GetPolicy() *checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition { + return v.Policy +} + +// GetTags returns checkIfPolicyEnforcementExistsQueryPolicyEnforcement.Tags, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcement) GetTags() []*checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag { + return v.Tags +} + +// checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition includes the requested fields of the GraphQL type PolicyDefinition. +type checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition struct { + Id string `json:"id"` +} + +// GetId returns checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition.Id, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcementPolicyPolicyDefinition) GetId() string { + return v.Id +} + +// checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag includes the requested fields of the GraphQL type Tag. +// The GraphQL type's documentation follows. +// +// Tag tells us about the tags that are linked to policies and other components. +type checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag struct { + Id string `json:"id"` +} + +// GetId returns checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag.Id, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsQueryPolicyEnforcementTagsTag) GetId() string { return v.Id } + +// checkIfPolicyEnforcementExistsResponse is returned by checkIfPolicyEnforcementExists on success. +type checkIfPolicyEnforcementExistsResponse struct { + QueryPolicyEnforcement []*checkIfPolicyEnforcementExistsQueryPolicyEnforcement `json:"queryPolicyEnforcement"` +} + +// GetQueryPolicyEnforcement returns checkIfPolicyEnforcementExistsResponse.QueryPolicyEnforcement, and is useful for accessing the field via an interface. +func (v *checkIfPolicyEnforcementExistsResponse) GetQueryPolicyEnforcement() []*checkIfPolicyEnforcementExistsQueryPolicyEnforcement { + return v.QueryPolicyEnforcement +} + +// checkIfPolicyNameExistsQueryPolicyDefinition includes the requested fields of the GraphQL type PolicyDefinition. +type checkIfPolicyNameExistsQueryPolicyDefinition struct { + Id string `json:"id"` + PolicyName string `json:"policyName"` + Stage string `json:"stage"` + Category string `json:"category"` + OwnerOrg *checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization `json:"ownerOrg"` +} + +// GetId returns checkIfPolicyNameExistsQueryPolicyDefinition.Id, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinition) GetId() string { return v.Id } + +// GetPolicyName returns checkIfPolicyNameExistsQueryPolicyDefinition.PolicyName, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinition) GetPolicyName() string { return v.PolicyName } + +// GetStage returns checkIfPolicyNameExistsQueryPolicyDefinition.Stage, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinition) GetStage() string { return v.Stage } + +// GetCategory returns checkIfPolicyNameExistsQueryPolicyDefinition.Category, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinition) GetCategory() string { return v.Category } + +// GetOwnerOrg returns checkIfPolicyNameExistsQueryPolicyDefinition.OwnerOrg, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinition) GetOwnerOrg() *checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization { + return v.OwnerOrg +} + +// checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization includes the requested fields of the GraphQL type Organization. +type checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization struct { + // id is randomly assigned + Id string `json:"id"` +} + +// GetId returns checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization.Id, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsQueryPolicyDefinitionOwnerOrgOrganization) GetId() string { + return v.Id +} + +// checkIfPolicyNameExistsResponse is returned by checkIfPolicyNameExists on success. +type checkIfPolicyNameExistsResponse struct { + QueryPolicyDefinition []*checkIfPolicyNameExistsQueryPolicyDefinition `json:"queryPolicyDefinition"` +} + +// GetQueryPolicyDefinition returns checkIfPolicyNameExistsResponse.QueryPolicyDefinition, and is useful for accessing the field via an interface. +func (v *checkIfPolicyNameExistsResponse) GetQueryPolicyDefinition() []*checkIfPolicyNameExistsQueryPolicyDefinition { + return v.QueryPolicyDefinition +} + +// checkIfTagExistsQueryTag includes the requested fields of the GraphQL type Tag. +// The GraphQL type's documentation follows. +// +// Tag tells us about the tags that are linked to policies and other components. +type checkIfTagExistsQueryTag struct { + Id string `json:"id"` +} + +// GetId returns checkIfTagExistsQueryTag.Id, and is useful for accessing the field via an interface. +func (v *checkIfTagExistsQueryTag) GetId() string { return v.Id } + +// checkIfTagExistsResponse is returned by checkIfTagExists on success. +type checkIfTagExistsResponse struct { + QueryTag []*checkIfTagExistsQueryTag `json:"queryTag"` +} + +// GetQueryTag returns checkIfTagExistsResponse.QueryTag, and is useful for accessing the field via an interface. +func (v *checkIfTagExistsResponse) GetQueryTag() []*checkIfTagExistsQueryTag { return v.QueryTag } + +// getLastPolicyIdQueryOrganization includes the requested fields of the GraphQL type Organization. +type getLastPolicyIdQueryOrganization struct { + PoliciesAggregate *getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult `json:"policiesAggregate"` +} + +// GetPoliciesAggregate returns getLastPolicyIdQueryOrganization.PoliciesAggregate, and is useful for accessing the field via an interface. +func (v *getLastPolicyIdQueryOrganization) GetPoliciesAggregate() *getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult { + return v.PoliciesAggregate +} + +// getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult includes the requested fields of the GraphQL type PolicyDefinitionAggregateResult. +type getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult struct { + Count *int `json:"count"` +} + +// GetCount returns getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult.Count, and is useful for accessing the field via an interface. +func (v *getLastPolicyIdQueryOrganizationPoliciesAggregatePolicyDefinitionAggregateResult) GetCount() *int { + return v.Count +} + +// getLastPolicyIdResponse is returned by getLastPolicyId on success. +type getLastPolicyIdResponse struct { + QueryOrganization []*getLastPolicyIdQueryOrganization `json:"queryOrganization"` +} + +// GetQueryOrganization returns getLastPolicyIdResponse.QueryOrganization, and is useful for accessing the field via an interface. +func (v *getLastPolicyIdResponse) GetQueryOrganization() []*getLastPolicyIdQueryOrganization { + return v.QueryOrganization +} + +// getLastTagIdAggregateTagTagAggregateResult includes the requested fields of the GraphQL type TagAggregateResult. +type getLastTagIdAggregateTagTagAggregateResult struct { + Count *int `json:"count"` +} + +// GetCount returns getLastTagIdAggregateTagTagAggregateResult.Count, and is useful for accessing the field via an interface. +func (v *getLastTagIdAggregateTagTagAggregateResult) GetCount() *int { return v.Count } + +// getLastTagIdResponse is returned by getLastTagId on success. +type getLastTagIdResponse struct { + AggregateTag *getLastTagIdAggregateTagTagAggregateResult `json:"aggregateTag"` +} + +// GetAggregateTag returns getLastTagIdResponse.AggregateTag, and is useful for accessing the field via an interface. +func (v *getLastTagIdResponse) GetAggregateTag() *getLastTagIdAggregateTagTagAggregateResult { + return v.AggregateTag +} + +// updatePolicyDefinitionResponse is returned by updatePolicyDefinition on success. +type updatePolicyDefinitionResponse struct { + UpdatePolicyDefinition *updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload `json:"updatePolicyDefinition"` +} + +// GetUpdatePolicyDefinition returns updatePolicyDefinitionResponse.UpdatePolicyDefinition, and is useful for accessing the field via an interface. +func (v *updatePolicyDefinitionResponse) GetUpdatePolicyDefinition() *updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload { + return v.UpdatePolicyDefinition +} + +// updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload includes the requested fields of the GraphQL type UpdatePolicyDefinitionPayload. +type updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload.NumUids, and is useful for accessing the field via an interface. +func (v *updatePolicyDefinitionUpdatePolicyDefinitionUpdatePolicyDefinitionPayload) GetNumUids() *int { + return v.NumUids +} + +// updatePolicyEnforcementResponse is returned by updatePolicyEnforcement on success. +type updatePolicyEnforcementResponse struct { + UpdatePolicyEnforcement *updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload `json:"updatePolicyEnforcement"` +} + +// GetUpdatePolicyEnforcement returns updatePolicyEnforcementResponse.UpdatePolicyEnforcement, and is useful for accessing the field via an interface. +func (v *updatePolicyEnforcementResponse) GetUpdatePolicyEnforcement() *updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload { + return v.UpdatePolicyEnforcement +} + +// updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload includes the requested fields of the GraphQL type UpdatePolicyEnforcementPayload. +type updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload struct { + NumUids *int `json:"numUids"` +} + +// GetNumUids returns updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload.NumUids, and is useful for accessing the field via an interface. +func (v *updatePolicyEnforcementUpdatePolicyEnforcementUpdatePolicyEnforcementPayload) GetNumUids() *int { + return v.NumUids +} + +// The query or mutation executed by addPolicyDefinition. +const addPolicyDefinition_Operation = ` +mutation addPolicyDefinition ($input: [AddPolicyDefinitionInput!]!) { + addPolicyDefinition(input: $input) { + numUids + } +} +` + +func addPolicyDefinition( + ctx_ context.Context, + client_ graphql.Client, + input []*AddPolicyDefinitionInput, +) (*addPolicyDefinitionResponse, error) { + req_ := &graphql.Request{ + OpName: "addPolicyDefinition", + Query: addPolicyDefinition_Operation, + Variables: &__addPolicyDefinitionInput{ + Input: input, + }, + } + var err_ error + + var data_ addPolicyDefinitionResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by addPolicyEnforcement. +const addPolicyEnforcement_Operation = ` +mutation addPolicyEnforcement ($input: [AddPolicyEnforcementInput!]!) { + addPolicyEnforcement(input: $input) { + numUids + } +} +` + +func addPolicyEnforcement( + ctx_ context.Context, + client_ graphql.Client, + input []*AddPolicyEnforcementInput, +) (*addPolicyEnforcementResponse, error) { + req_ := &graphql.Request{ + OpName: "addPolicyEnforcement", + Query: addPolicyEnforcement_Operation, + Variables: &__addPolicyEnforcementInput{ + Input: input, + }, + } + var err_ error + + var data_ addPolicyEnforcementResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by addTag. +const addTag_Operation = ` +mutation addTag ($tags: [AddTagInput!]!) { + addTag(input: $tags) { + numUids + } +} +` + +func addTag( + ctx_ context.Context, + client_ graphql.Client, + tags []*AddTagInput, +) (*addTagResponse, error) { + req_ := &graphql.Request{ + OpName: "addTag", + Query: addTag_Operation, + Variables: &__addTagInput{ + Tags: tags, + }, + } + var err_ error + + var data_ addTagResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by assignTagsToPolicy. +const assignTagsToPolicy_Operation = ` +mutation assignTagsToPolicy ($policyEnforcementId: ID!, $tags: TagRef!, $updatedAt: DateTime!) { + updatePolicyEnforcement(input: {filter:{id:[$policyEnforcementId]},set:{tags:[$tags],updatedAt:$updatedAt}}) { + numUids + } +} +` + +func assignTagsToPolicy( + ctx_ context.Context, + client_ graphql.Client, + policyEnforcementId *string, + tags *TagRef, + updatedAt *time.Time, +) (*assignTagsToPolicyResponse, error) { + req_ := &graphql.Request{ + OpName: "assignTagsToPolicy", + Query: assignTagsToPolicy_Operation, + Variables: &__assignTagsToPolicyInput{ + PolicyEnforcementId: policyEnforcementId, + Tags: tags, + UpdatedAt: updatedAt, + }, + } + var err_ error + + var data_ assignTagsToPolicyResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by checkIfExactPolicyDefinitionExists. +const checkIfExactPolicyDefinitionExists_Operation = ` +query checkIfExactPolicyDefinitionExists ($policyName: String!, $category: String!, $stage: String!, $description: String!, $scheduledPolicy: Boolean!, $script: String!, $variables: String!, $conditionName: String!, $suggestion: String!, $orgId: String!) { + queryPolicyDefinition(filter: {policyName:{eq:$policyName},category:{eq:$category},stage:{eq:$stage},description:{eq:$description},scheduledPolicy:$scheduledPolicy,script:{eq:$script},variables:{eq:$variables},conditionName:{eq:$conditionName},suggestion:{eq:$suggestion}}) @cascade { + id + ownerOrg(filter: {id:{eq:$orgId}}) { + id + } + } +} +` + +func checkIfExactPolicyDefinitionExists( + ctx_ context.Context, + client_ graphql.Client, + policyName string, + category string, + stage string, + description string, + scheduledPolicy *bool, + script string, + variables string, + conditionName string, + suggestion string, + orgId string, +) (*checkIfExactPolicyDefinitionExistsResponse, error) { + req_ := &graphql.Request{ + OpName: "checkIfExactPolicyDefinitionExists", + Query: checkIfExactPolicyDefinitionExists_Operation, + Variables: &__checkIfExactPolicyDefinitionExistsInput{ + PolicyName: policyName, + Category: category, + Stage: stage, + Description: description, + ScheduledPolicy: scheduledPolicy, + Script: script, + Variables: variables, + ConditionName: conditionName, + Suggestion: suggestion, + OrgId: orgId, + }, + } + var err_ error + + var data_ checkIfExactPolicyDefinitionExistsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by checkIfPolicyEnforcementExists. +const checkIfPolicyEnforcementExists_Operation = ` +query checkIfPolicyEnforcementExists ($datasourceTool: String!, $policyId: String!) { + queryPolicyEnforcement(filter: {datasourceTool:{eq:$datasourceTool}}) @cascade(fields: ["policy"]) { + id + conditionValue + policy(filter: {id:{eq:$policyId}}) { + id + } + tags { + id + } + } +} +` + +func checkIfPolicyEnforcementExists( + ctx_ context.Context, + client_ graphql.Client, + datasourceTool string, + policyId string, +) (*checkIfPolicyEnforcementExistsResponse, error) { + req_ := &graphql.Request{ + OpName: "checkIfPolicyEnforcementExists", + Query: checkIfPolicyEnforcementExists_Operation, + Variables: &__checkIfPolicyEnforcementExistsInput{ + DatasourceTool: datasourceTool, + PolicyId: policyId, + }, + } + var err_ error + + var data_ checkIfPolicyEnforcementExistsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by checkIfPolicyNameExists. +const checkIfPolicyNameExists_Operation = ` +query checkIfPolicyNameExists ($policyName: String!, $orgId: String!) { + queryPolicyDefinition(filter: {policyName:{eq:$policyName}}) @cascade { + id + policyName + stage + category + ownerOrg(filter: {id:{eq:$orgId}}) { + id + } + } +} +` + +func checkIfPolicyNameExists( + ctx_ context.Context, + client_ graphql.Client, + policyName string, + orgId string, +) (*checkIfPolicyNameExistsResponse, error) { + req_ := &graphql.Request{ + OpName: "checkIfPolicyNameExists", + Query: checkIfPolicyNameExists_Operation, + Variables: &__checkIfPolicyNameExistsInput{ + PolicyName: policyName, + OrgId: orgId, + }, + } + var err_ error + + var data_ checkIfPolicyNameExistsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by checkIfTagExists. +const checkIfTagExists_Operation = ` +query checkIfTagExists ($tagName: String!, $tagValue: String!, $createdBy: String!) { + queryTag(filter: {tagName:{eq:$tagName},tagValue:{eq:$tagValue},createdBy:{eq:$createdBy}}) { + id + } +} +` + +func checkIfTagExists( + ctx_ context.Context, + client_ graphql.Client, + tagName string, + tagValue string, + createdBy string, +) (*checkIfTagExistsResponse, error) { + req_ := &graphql.Request{ + OpName: "checkIfTagExists", + Query: checkIfTagExists_Operation, + Variables: &__checkIfTagExistsInput{ + TagName: tagName, + TagValue: tagValue, + CreatedBy: createdBy, + }, + } + var err_ error + + var data_ checkIfTagExistsResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by getLastPolicyId. +const getLastPolicyId_Operation = ` +query getLastPolicyId ($organizationId: String!) { + queryOrganization(filter: {id:{eq:$organizationId}}) { + policiesAggregate { + count + } + } +} +` + +func getLastPolicyId( + ctx_ context.Context, + client_ graphql.Client, + organizationId string, +) (*getLastPolicyIdResponse, error) { + req_ := &graphql.Request{ + OpName: "getLastPolicyId", + Query: getLastPolicyId_Operation, + Variables: &__getLastPolicyIdInput{ + OrganizationId: organizationId, + }, + } + var err_ error + + var data_ getLastPolicyIdResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by getLastTagId. +const getLastTagId_Operation = ` +query getLastTagId { + aggregateTag { + count + } +} +` + +func getLastTagId( + ctx_ context.Context, + client_ graphql.Client, +) (*getLastTagIdResponse, error) { + req_ := &graphql.Request{ + OpName: "getLastTagId", + Query: getLastTagId_Operation, + } + var err_ error + + var data_ getLastTagIdResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by updatePolicyDefinition. +const updatePolicyDefinition_Operation = ` +mutation updatePolicyDefinition ($policyId: String!, $category: String!, $stage: String!, $description: String!, $scheduledPolicy: Boolean!, $script: String!, $variables: String!, $conditionName: String!, $suggestion: String!, $updatedAt: DateTime!) { + updatePolicyDefinition(input: {filter:{id:{eq:$policyId}},set:{category:$category,stage:$stage,description:$description,scheduledPolicy:$scheduledPolicy,script:$script,variables:$variables,conditionName:$conditionName,suggestion:$suggestion,updatedAt:$updatedAt}}) { + numUids + } +} +` + +func updatePolicyDefinition( + ctx_ context.Context, + client_ graphql.Client, + policyId string, + category string, + stage string, + description string, + scheduledPolicy *bool, + script string, + variables string, + conditionName string, + suggestion string, + updatedAt *time.Time, +) (*updatePolicyDefinitionResponse, error) { + req_ := &graphql.Request{ + OpName: "updatePolicyDefinition", + Query: updatePolicyDefinition_Operation, + Variables: &__updatePolicyDefinitionInput{ + PolicyId: policyId, + Category: category, + Stage: stage, + Description: description, + ScheduledPolicy: scheduledPolicy, + Script: script, + Variables: variables, + ConditionName: conditionName, + Suggestion: suggestion, + UpdatedAt: updatedAt, + }, + } + var err_ error + + var data_ updatePolicyDefinitionResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + +// The query or mutation executed by updatePolicyEnforcement. +const updatePolicyEnforcement_Operation = ` +mutation updatePolicyEnforcement ($conditionValue: String!, $policyEnfId: ID!, $updatedAt: DateTime!) { + updatePolicyEnforcement(input: {set:{conditionValue:$conditionValue,updatedAt:$updatedAt},filter:{id:[$policyEnfId]}}) { + numUids + } +} +` + +func updatePolicyEnforcement( + ctx_ context.Context, + client_ graphql.Client, + conditionValue string, + policyEnfId *string, + updatedAt *time.Time, +) (*updatePolicyEnforcementResponse, error) { + req_ := &graphql.Request{ + OpName: "updatePolicyEnforcement", + Query: updatePolicyEnforcement_Operation, + Variables: &__updatePolicyEnforcementInput{ + ConditionValue: conditionValue, + PolicyEnfId: policyEnfId, + UpdatedAt: updatedAt, + }, + } + var err_ error + + var data_ updatePolicyEnforcementResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} diff --git a/policies/schema.graphql b/policies/schema.graphql new file mode 100644 index 0000000..183bbbd --- /dev/null +++ b/policies/schema.graphql @@ -0,0 +1,4664 @@ +directive @secret(field: String!, pred: String) on OBJECT | INTERFACE + +directive @custom(http: CustomHTTP, dql: String) on FIELD_DEFINITION + +directive @lambdaOnMutate(add: Boolean, update: Boolean, delete: Boolean) on OBJECT | INTERFACE + +directive @hasInverse(field: String!) on FIELD_DEFINITION + +directive @search(by: [DgraphIndex!]) on FIELD_DEFINITION + +directive @dgraph(type: String, pred: String) on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @auth(password: AuthRule, query: AuthRule, add: AuthRule, update: AuthRule, delete: AuthRule) on OBJECT | INTERFACE + +directive @remoteResponse(name: String) on FIELD_DEFINITION + +directive @cacheControl(maxAge: Int!) on QUERY + +directive @generate(query: GenerateQueryParams, mutation: GenerateMutationParams, subscription: Boolean) on OBJECT | INTERFACE + +directive @id(interface: Boolean) on FIELD_DEFINITION + +directive @withSubscription on OBJECT | INTERFACE | FIELD_DEFINITION + +directive @remote on OBJECT | INTERFACE | UNION | INPUT_OBJECT | ENUM + +directive @cascade(fields: [String]) on FIELD + +directive @lambda on FIELD_DEFINITION + +input AddApplicationDeploymentInput { + """id is randomly assigned""" + id: String! + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef! + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +type AddApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input AddApplicationDeploymentRiskInput { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef! +} + +type AddApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input AddApplicationEnvironmentInput { + """id is randomly assigned""" + id: String! + environment: EnvironmentRef + application: ApplicationRef! + deploymentTarget: DeploymentTargetRef! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +type AddApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input AddApplicationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef! + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +type AddApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input AddApplicationRiskStatusInput { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironmentRef! +} + +type AddApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input AddArtifactInput { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type AddArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input AddArtifactScanDataInput { + id: String! + artifactSha: String! + tool: String! + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +type AddArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input AddBuildToolInput { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime! +} + +type AddBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input AddCommitMetaDataInput { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef! +} + +type AddCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input AddComponentInput { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +type AddComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input AddCredentialsInput { + data: String! + integrator: IntegratorRef! +} + +type AddCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input AddCWEInput { + id: String! + name: String! + description: String +} + +type AddCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input AddDeploymentTargetInput { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef! + defaultEnvironment: EnvironmentRef! +} + +type AddDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input AddEnvironmentInput { + id: String! + organization: OrganizationRef! + purpose: String! +} + +type AddEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input AddFeatureModeInput { + id: String! + organization: OrganizationRef! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input AddIntegratorInput { + id: String! + organization: OrganizationRef! + name: String! + type: String! + category: String! + credentials: CredentialsRef! + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input AddKeyValueInput { + id: String! + name: String! + value: String! +} + +type AddKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input AddOrganizationInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type AddOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input AddPolicyDefinitionInput { + id: String! + ownerOrg: OrganizationRef! + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type AddPolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input AddPolicyEnforcementInput { + policy: PolicyDefinitionRef! + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime! + updatedAt: DateTime! +} + +type AddPolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input AddRoleInput { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type AddRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input AddRunHistoryInput { + policyId: String! + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements: PolicyEnforcementRef! + securityIssue: SecurityIssueRef +} + +type AddRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input AddSchemaVersionInput { + version: String! +} + +type AddSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input AddSecurityIssueInput { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +type AddSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input AddSourceCodeToolInput { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef! +} + +type AddSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input AddTagInput { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcementRef!] +} + +type AddTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input AddTeamInput { + """id is randomly assigned""" + id: String! + name: String! + roles: [RoleRef!] + organization: OrganizationRef! + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type AddTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input AddToolsUsedInput { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type AddToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input AddVulnerabilityInput { + id: String! + parent: String! + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +type AddVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Application implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + environments(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment!] + team(filter: TeamFilter): Team! + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + environmentsAggregate(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + """id is randomly assigned""" + id: String! + + """artifact that is deployed""" + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact!] + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage! + + """source is argo, spinnaker etc""" + source: String! + + """component would be a service""" + component: String! + + """user who deployed the artifact""" + deployedBy: String + + """ + toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools + """ + toolsUsed(filter: ToolsUsedFilter): ToolsUsed! + + """deploymentRisk is the risk status of the deployment""" + deploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRisk + + """policyRunHistory is the policy execution history for this deployment""" + policyRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + artifactAggregate(filter: ArtifactFilter): ArtifactAggregateResult + policyRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ApplicationDeploymentAggregateResult { + count: Int + idMin: String + idMax: String + deployedAtMin: DateTime + deployedAtMax: DateTime + sourceMin: String + sourceMax: String + componentMin: String + componentMax: String + deployedByMin: String + deployedByMax: String +} + +input ApplicationDeploymentFilter { + id: StringHashFilter + deployedAt: DateTimeFilter + deploymentStage: DeploymentStage_exact + component: StringExactFilter_StringRegExpFilter + has: [ApplicationDeploymentHasFilter] + and: [ApplicationDeploymentFilter] + or: [ApplicationDeploymentFilter] + not: ApplicationDeploymentFilter +} + +enum ApplicationDeploymentHasFilter { + id + artifact + applicationEnvironment + deployedAt + deploymentStage + source + component + deployedBy + toolsUsed + deploymentRisk + policyRunHistory +} + +input ApplicationDeploymentOrder { + asc: ApplicationDeploymentOrderable + desc: ApplicationDeploymentOrderable + then: ApplicationDeploymentOrder +} + +enum ApplicationDeploymentOrderable { + id + deployedAt + source + component + deployedBy +} + +input ApplicationDeploymentPatch { + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +input ApplicationDeploymentRef { + """id is randomly assigned""" + id: String + artifact: [ArtifactRef!] + applicationEnvironment: ApplicationEnvironmentRef + deployedAt: DateTime + + """ + deploymentStage is an enum and can be discovered, current, previous or blocked + """ + deploymentStage: DeploymentStage + + """source is argo, spinnaker etc""" + source: String + + """component would be a service""" + component: String + + """user who deployed the artifact""" + deployedBy: String + toolsUsed: ToolsUsedRef + deploymentRisk: ApplicationDeploymentRiskRef + policyRunHistory: [RunHistoryRef!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment! +} + +type ApplicationDeploymentRiskAggregateResult { + count: Int + sourceCodeAlertsScoreMin: Int + sourceCodeAlertsScoreMax: Int + sourceCodeAlertsScoreSum: Int + sourceCodeAlertsScoreAvg: Float + buildAlertsScoreMin: Int + buildAlertsScoreMax: Int + buildAlertsScoreSum: Int + buildAlertsScoreAvg: Float + artifactAlertsScoreMin: Int + artifactAlertsScoreMax: Int + artifactAlertsScoreSum: Int + artifactAlertsScoreAvg: Float + deploymentAlertsScoreMin: Int + deploymentAlertsScoreMax: Int + deploymentAlertsScoreSum: Int + deploymentAlertsScoreAvg: Float +} + +input ApplicationDeploymentRiskFilter { + id: [ID!] + deploymentRiskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationDeploymentRiskHasFilter] + and: [ApplicationDeploymentRiskFilter] + or: [ApplicationDeploymentRiskFilter] + not: ApplicationDeploymentRiskFilter +} + +enum ApplicationDeploymentRiskHasFilter { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore + deploymentRiskStatus + applicationDeployment +} + +input ApplicationDeploymentRiskOrder { + asc: ApplicationDeploymentRiskOrderable + desc: ApplicationDeploymentRiskOrderable + then: ApplicationDeploymentRiskOrder +} + +enum ApplicationDeploymentRiskOrderable { + sourceCodeAlertsScore + buildAlertsScore + artifactAlertsScore + deploymentAlertsScore +} + +input ApplicationDeploymentRiskPatch { + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +input ApplicationDeploymentRiskRef { + id: ID + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeploymentRef +} + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment { + """id is randomly assigned""" + id: String! + + """environment denotes whether it is dev, prod, staging, non-prod etc""" + environment(filter: EnvironmentFilter): Environment + application(filter: ApplicationFilter): Application! + deploymentTarget(filter: DeploymentTargetFilter): DeploymentTarget! + namespace: String! + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + riskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatus + metadata(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + deploymentsAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + metadataAggregate(filter: KeyValueFilter): KeyValueAggregateResult +} + +type ApplicationEnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + namespaceMin: String + namespaceMax: String +} + +input ApplicationEnvironmentFilter { + id: StringHashFilter + namespace: StringExactFilter_StringRegExpFilter + has: [ApplicationEnvironmentHasFilter] + and: [ApplicationEnvironmentFilter] + or: [ApplicationEnvironmentFilter] + not: ApplicationEnvironmentFilter +} + +enum ApplicationEnvironmentHasFilter { + id + environment + application + deploymentTarget + namespace + toolsUsed + deployments + riskStatus + metadata +} + +input ApplicationEnvironmentOrder { + asc: ApplicationEnvironmentOrderable + desc: ApplicationEnvironmentOrderable + then: ApplicationEnvironmentOrder +} + +enum ApplicationEnvironmentOrderable { + id + namespace +} + +input ApplicationEnvironmentPatch { + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationEnvironmentRef { + """id is randomly assigned""" + id: String + environment: EnvironmentRef + application: ApplicationRef + deploymentTarget: DeploymentTargetRef + namespace: String + + """ + toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env + """ + toolsUsed: [String!] + deployments: [ApplicationDeploymentRef!] + riskStatus: ApplicationRiskStatusRef + metadata: [KeyValueRef!] +} + +input ApplicationFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + has: [ApplicationHasFilter] + and: [ApplicationFilter] + or: [ApplicationFilter] + not: ApplicationFilter +} + +enum ApplicationHasFilter { + id + name + roles + environments + team + policies + policyEnforcements + metadata +} + +input ApplicationOrder { + asc: ApplicationOrderable + desc: ApplicationOrderable + then: ApplicationOrder +} + +enum ApplicationOrderable { + id + name +} + +input ApplicationPatch { + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +input ApplicationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + environments: [ApplicationEnvironmentRef!] + team: TeamRef + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + metadata: [KeyValueRef!] +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironment! +} + +type ApplicationRiskStatusAggregateResult { + count: Int + sourceCodeAlertsMin: Int + sourceCodeAlertsMax: Int + sourceCodeAlertsSum: Int + sourceCodeAlertsAvg: Float + buildAlertsMin: Int + buildAlertsMax: Int + buildAlertsSum: Int + buildAlertsAvg: Float + artifactAlertsMin: Int + artifactAlertsMax: Int + artifactAlertsSum: Int + artifactAlertsAvg: Float + deploymentAlertsMin: Int + deploymentAlertsMax: Int + deploymentAlertsSum: Int + deploymentAlertsAvg: Float + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input ApplicationRiskStatusFilter { + id: [ID!] + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ApplicationRiskStatusHasFilter] + and: [ApplicationRiskStatusFilter] + or: [ApplicationRiskStatusFilter] + not: ApplicationRiskStatusFilter +} + +enum ApplicationRiskStatusHasFilter { + riskStatus + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt + applicationEnvironment +} + +input ApplicationRiskStatusOrder { + asc: ApplicationRiskStatusOrderable + desc: ApplicationRiskStatusOrderable + then: ApplicationRiskStatusOrder +} + +enum ApplicationRiskStatusOrderable { + sourceCodeAlerts + buildAlerts + artifactAlerts + deploymentAlerts + createdAt + updatedAt +} + +input ApplicationRiskStatusPatch { + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +input ApplicationRiskStatusRef { + id: ID + riskStatus: RiskStatus + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime + updatedAt: DateTime + applicationEnvironment: ApplicationEnvironmentRef +} + +type Artifact { + id: String! + artifactType: String! + artifactName: String! + artifactTag: String! + artifactSha: String! + scanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + artifactDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment!] + buildDetails(filter: BuildToolFilter): BuildTool + scanDataAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + artifactDeploymentAggregate(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult +} + +type ArtifactAggregateResult { + count: Int + idMin: String + idMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactNameMin: String + artifactNameMax: String + artifactTagMin: String + artifactTagMax: String + artifactShaMin: String + artifactShaMax: String +} + +input ArtifactFilter { + id: StringHashFilter + artifactType: StringExactFilter + artifactName: StringExactFilter_StringRegExpFilter + artifactTag: StringExactFilter_StringRegExpFilter + artifactSha: StringExactFilter + has: [ArtifactHasFilter] + and: [ArtifactFilter] + or: [ArtifactFilter] + not: ArtifactFilter +} + +enum ArtifactHasFilter { + id + artifactType + artifactName + artifactTag + artifactSha + scanData + artifactDeployment + buildDetails +} + +input ArtifactOrder { + asc: ArtifactOrderable + desc: ArtifactOrderable + then: ArtifactOrder +} + +enum ArtifactOrderable { + id + artifactType + artifactName + artifactTag + artifactSha +} + +input ArtifactPatch { + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +input ArtifactRef { + id: String + artifactType: String + artifactName: String + artifactTag: String + artifactSha: String + scanData: [ArtifactScanDataRef!] + artifactDeployment: [ApplicationDeploymentRef!] + buildDetails: BuildToolRef +} + +type ArtifactScanData { + id: String! + artifactSha: String! + tool: String! + artifactDetails(filter: ArtifactFilter): Artifact + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + componentsAggregate(filter: ComponentFilter): ComponentAggregateResult + artifactRunHistoryAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type ArtifactScanDataAggregateResult { + count: Int + idMin: String + idMax: String + artifactShaMin: String + artifactShaMax: String + toolMin: String + toolMax: String + lastScannedAtMin: DateTime + lastScannedAtMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + vulnTrackingIdMin: String + vulnTrackingIdMax: String + vulnCriticalCountMin: Int + vulnCriticalCountMax: Int + vulnCriticalCountSum: Int + vulnCriticalCountAvg: Float + vulnHighCountMin: Int + vulnHighCountMax: Int + vulnHighCountSum: Int + vulnHighCountAvg: Float + vulnMediumCountMin: Int + vulnMediumCountMax: Int + vulnMediumCountSum: Int + vulnMediumCountAvg: Float + vulnLowCountMin: Int + vulnLowCountMax: Int + vulnLowCountSum: Int + vulnLowCountAvg: Float + vulnInfoCountMin: Int + vulnInfoCountMax: Int + vulnInfoCountSum: Int + vulnInfoCountAvg: Float + vulnUnknownCountMin: Int + vulnUnknownCountMax: Int + vulnUnknownCountSum: Int + vulnUnknownCountAvg: Float + vulnNoneCountMin: Int + vulnNoneCountMax: Int + vulnNoneCountSum: Int + vulnNoneCountAvg: Float + vulnTotalCountMin: Int + vulnTotalCountMax: Int + vulnTotalCountSum: Int + vulnTotalCountAvg: Float + sbomUrlMin: String + sbomUrlMax: String + artifactLicenseScanUrlMin: String + artifactLicenseScanUrlMax: String + artifactSecretScanUrlMin: String + artifactSecretScanUrlMax: String + sourceLicenseScanUrlMin: String + sourceLicenseScanUrlMax: String + sourceSecretScanUrlMin: String + sourceSecretScanUrlMax: String + sourceScorecardScanUrlMin: String + sourceScorecardScanUrlMax: String + sourceSemgrepHighSeverityScanUrlMin: String + sourceSemgrepHighSeverityScanUrlMax: String + sourceSemgrepMediumSeverityScanUrlMin: String + sourceSemgrepMediumSeverityScanUrlMax: String + sourceSemgrepLowSeverityScanUrlMin: String + sourceSemgrepLowSeverityScanUrlMax: String + sourceSnykScanUrlMin: String + sourceSnykScanUrlMax: String + virusTotalUrlScanMin: String + virusTotalUrlScanMax: String +} + +input ArtifactScanDataFilter { + id: StringHashFilter + artifactSha: StringExactFilter + tool: StringExactFilter + vulnCriticalCount: IntFilter + vulnHighCount: IntFilter + vulnMediumCount: IntFilter + vulnLowCount: IntFilter + vulnInfoCount: IntFilter + vulnUnknownCount: IntFilter + vulnNoneCount: IntFilter + vulnTotalCount: IntFilter + riskStatus: RiskStatus_exact_StringRegExpFilter + has: [ArtifactScanDataHasFilter] + and: [ArtifactScanDataFilter] + or: [ArtifactScanDataFilter] + not: ArtifactScanDataFilter +} + +enum ArtifactScanDataHasFilter { + id + artifactSha + tool + artifactDetails + lastScannedAt + createdAt + vulnTrackingId + components + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan + riskStatus + artifactRunHistory +} + +input ArtifactScanDataOrder { + asc: ArtifactScanDataOrderable + desc: ArtifactScanDataOrderable + then: ArtifactScanDataOrder +} + +enum ArtifactScanDataOrderable { + id + artifactSha + tool + lastScannedAt + createdAt + vulnTrackingId + vulnCriticalCount + vulnHighCount + vulnMediumCount + vulnLowCount + vulnInfoCount + vulnUnknownCount + vulnNoneCount + vulnTotalCount + sbomUrl + artifactLicenseScanUrl + artifactSecretScanUrl + sourceLicenseScanUrl + sourceSecretScanUrl + sourceScorecardScanUrl + sourceSemgrepHighSeverityScanUrl + sourceSemgrepMediumSeverityScanUrl + sourceSemgrepLowSeverityScanUrl + sourceSnykScanUrl + virusTotalUrlScan +} + +input ArtifactScanDataPatch { + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input ArtifactScanDataRef { + id: String + artifactSha: String + tool: String + artifactDetails: ArtifactRef + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [ComponentRef!] + vulnCriticalCount: Int + vulnHighCount: Int + vulnMediumCount: Int + vulnLowCount: Int + vulnInfoCount: Int + vulnUnknownCount: Int + vulnNoneCount: Int + vulnTotalCount: Int + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus + artifactRunHistory: [RunHistoryRef!] +} + +input AuthRule { + and: [AuthRule] + or: [AuthRule] + not: AuthRule + rule: String +} + +"""BuildTool contains data from build tool events.""" +type BuildTool { + """id is randomly assigned""" + id: String! + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String! + + """tool is jenkins etc""" + tool: String! + + """buildName is the name of the job/pipeline/action""" + buildName: String! + buildUrl: String! + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String! + + """artifactTag would be the tag of the artifact""" + artifactTag: String! + + """digest is the sha of the artifact""" + digest: String! + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + + """artifactNode links a BuildTool node to an artifact""" + artifactNode(filter: ArtifactFilter): Artifact + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + + """sourceCodeTool links a BuildTool node to the source details""" + sourceCodeTool(filter: SourceCodeToolFilter): SourceCodeTool + + """commitMetaData links a BuildTool node to the git commit based details""" + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData!] + createdAt: DateTime! + commitMetaDataAggregate(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult +} + +type BuildToolAggregateResult { + count: Int + idMin: String + idMax: String + buildIdMin: String + buildIdMax: String + toolMin: String + toolMax: String + buildNameMin: String + buildNameMax: String + buildUrlMin: String + buildUrlMax: String + artifactTypeMin: String + artifactTypeMax: String + artifactMin: String + artifactMax: String + artifactTagMin: String + artifactTagMax: String + digestMin: String + digestMax: String + buildDigestMin: String + buildDigestMax: String + buildTimeMin: DateTime + buildTimeMax: DateTime + buildUserMin: String + buildUserMax: String + createdAtMin: DateTime + createdAtMax: DateTime +} + +input BuildToolFilter { + id: StringHashFilter + buildId: StringExactFilter_StringRegExpFilter + tool: StringExactFilter + buildName: StringExactFilter_StringRegExpFilter + buildUrl: StringExactFilter + artifactType: StringExactFilter + artifact: StringExactFilter + artifactTag: StringExactFilter + digest: StringExactFilter + buildDigest: StringExactFilter + has: [BuildToolHasFilter] + and: [BuildToolFilter] + or: [BuildToolFilter] + not: BuildToolFilter +} + +enum BuildToolHasFilter { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + artifactNode + buildTime + buildUser + sourceCodeTool + commitMetaData + createdAt +} + +input BuildToolOrder { + asc: BuildToolOrderable + desc: BuildToolOrderable + then: BuildToolOrder +} + +enum BuildToolOrderable { + id + buildId + tool + buildName + buildUrl + artifactType + artifact + artifactTag + digest + buildDigest + buildTime + buildUser + createdAt +} + +input BuildToolPatch { + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +input BuildToolRef { + """id is randomly assigned""" + id: String + + """buildId is a unique job id, run id for a job/pipeline/action""" + buildId: String + + """tool is jenkins etc""" + tool: String + + """buildName is the name of the job/pipeline/action""" + buildName: String + buildUrl: String + artifactType: String + + """artifact would be something like nginx without the tag""" + artifact: String + + """artifactTag would be the tag of the artifact""" + artifactTag: String + + """digest is the sha of the artifact""" + digest: String + + """buildDigest is the sha of the artifact as sent from the build tool""" + buildDigest: String + artifactNode: ArtifactRef + + """buildTime is the time at which the artifact was built""" + buildTime: DateTime + + """buildUser is the user that built the artifact""" + buildUser: String + sourceCodeTool: SourceCodeToolRef + commitMetaData: [CommitMetaDataRef!] + createdAt: DateTime +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + """id is randomly assigned""" + id: ID! + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool(filter: BuildToolFilter): BuildTool! +} + +type CommitMetaDataAggregateResult { + count: Int + commitMin: String + commitMax: String + repositoryMin: String + repositoryMax: String + noOfReviewersConfMin: Int + noOfReviewersConfMax: Int + noOfReviewersConfSum: Int + noOfReviewersConfAvg: Float +} + +input CommitMetaDataFilter { + id: [ID!] + has: [CommitMetaDataHasFilter] + and: [CommitMetaDataFilter] + or: [CommitMetaDataFilter] + not: CommitMetaDataFilter +} + +enum CommitMetaDataHasFilter { + commit + repository + commitSign + noOfReviewersConf + reviewerList + approverList + buildTool +} + +input CommitMetaDataOrder { + asc: CommitMetaDataOrderable + desc: CommitMetaDataOrderable + then: CommitMetaDataOrder +} + +enum CommitMetaDataOrderable { + commit + repository + noOfReviewersConf +} + +input CommitMetaDataPatch { + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +input CommitMetaDataRef { + """id is randomly assigned""" + id: ID + + """commit is a git commit that was used to build an artifact""" + commit: String + repository: String + + """commitSign tells us whether the commit is signed""" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildToolRef +} + +type Component { + id: String! + type: String! + name: String! + version: String! + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability!] + artifacts(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData!] + vulnerabilitiesAggregate(filter: VulnerabilityFilter): VulnerabilityAggregateResult + artifactsAggregate(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult +} + +type ComponentAggregateResult { + count: Int + idMin: String + idMax: String + typeMin: String + typeMax: String + nameMin: String + nameMax: String + versionMin: String + versionMax: String + purlMin: String + purlMax: String + cpeMin: String + cpeMax: String + scannedAtMin: DateTime + scannedAtMax: DateTime +} + +input ComponentFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + version: StringExactFilter_StringRegExpFilter + purl: StringExactFilter + cpe: StringExactFilter + has: [ComponentHasFilter] + and: [ComponentFilter] + or: [ComponentFilter] + not: ComponentFilter +} + +enum ComponentHasFilter { + id + type + name + version + licenses + purl + cpe + scannedAt + vulnerabilities + artifacts +} + +input ComponentOrder { + asc: ComponentOrderable + desc: ComponentOrderable + then: ComponentOrder +} + +enum ComponentOrderable { + id + type + name + version + purl + cpe + scannedAt +} + +input ComponentPatch { + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ComponentRef { + id: String + type: String + name: String + version: String + licenses: [String!] + purl: String + cpe: String + scannedAt: DateTime + vulnerabilities: [VulnerabilityRef!] + artifacts: [ArtifactScanDataRef!] +} + +input ContainsFilter { + point: PointRef + polygon: PolygonRef +} + +type Credentials { + id: ID! + data: String! + integrator(filter: IntegratorFilter): Integrator! +} + +type CredentialsAggregateResult { + count: Int + dataMin: String + dataMax: String +} + +input CredentialsFilter { + id: [ID!] + has: [CredentialsHasFilter] + and: [CredentialsFilter] + or: [CredentialsFilter] + not: CredentialsFilter +} + +enum CredentialsHasFilter { + data + integrator +} + +input CredentialsOrder { + asc: CredentialsOrderable + desc: CredentialsOrderable + then: CredentialsOrder +} + +enum CredentialsOrderable { + data +} + +input CredentialsPatch { + data: String + integrator: IntegratorRef +} + +input CredentialsRef { + id: ID + data: String + integrator: IntegratorRef +} + +input CustomHTTP { + url: String! + method: HTTPMethod! + body: String + graphql: String + mode: Mode + forwardHeaders: [String!] + secretHeaders: [String!] + introspectionHeaders: [String!] + skipIntrospection: Boolean +} + +type CWE { + id: String! + name: String! + description: String +} + +type CWEAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + descriptionMin: String + descriptionMax: String +} + +input CWEFilter { + id: StringHashFilter + has: [CWEHasFilter] + and: [CWEFilter] + or: [CWEFilter] + not: CWEFilter +} + +enum CWEHasFilter { + id + name + description +} + +input CWEOrder { + asc: CWEOrderable + desc: CWEOrderable + then: CWEOrder +} + +enum CWEOrderable { + id + name + description +} + +input CWEPatch { + name: String + description: String +} + +input CWERef { + id: String + name: String + description: String +} + +""" +The DateTime scalar type represents date and time as a string in RFC3339 format. +For example: "1985-04-12T23:20:50.52Z" represents 20 mins 50.52 secs after the 23rd hour of Apr 12th 1985 in UTC. +""" +scalar DateTime + +input DateTimeFilter { + eq: DateTime + in: [DateTime] + le: DateTime + lt: DateTime + ge: DateTime + gt: DateTime + between: DateTimeRange +} + +input DateTimeRange { + min: DateTime! + max: DateTime! +} + +type DeleteApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + msg: String + numUids: Int +} + +type DeleteApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + msg: String + numUids: Int +} + +type DeleteApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + msg: String + numUids: Int +} + +type DeleteApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + msg: String + numUids: Int +} + +type DeleteApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + msg: String + numUids: Int +} + +type DeleteArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + msg: String + numUids: Int +} + +type DeleteArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + msg: String + numUids: Int +} + +type DeleteBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + msg: String + numUids: Int +} + +type DeleteCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + msg: String + numUids: Int +} + +type DeleteComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + msg: String + numUids: Int +} + +type DeleteCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + msg: String + numUids: Int +} + +type DeleteCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + msg: String + numUids: Int +} + +type DeleteDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + msg: String + numUids: Int +} + +type DeleteEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + msg: String + numUids: Int +} + +type DeleteFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + msg: String + numUids: Int +} + +type DeleteIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + msg: String + numUids: Int +} + +type DeleteKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + msg: String + numUids: Int +} + +type DeleteOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + msg: String + numUids: Int +} + +type DeletePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + msg: String + numUids: Int +} + +type DeletePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + msg: String + numUids: Int +} + +type DeleteRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + msg: String + numUids: Int +} + +type DeleteRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + msg: String + numUids: Int +} + +type DeleteRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + msg: String + numUids: Int +} + +type DeleteSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + msg: String + numUids: Int +} + +type DeleteSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + msg: String + numUids: Int +} + +type DeleteSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + msg: String + numUids: Int +} + +type DeleteTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + msg: String + numUids: Int +} + +type DeleteTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + msg: String + numUids: Int +} + +type DeleteToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + msg: String + numUids: Int +} + +type DeleteVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + msg: String + numUids: Int +} + +"""DeploymentStage is an enum denoting the stage of the deployment. .""" +enum DeploymentStage { + """deployment is discovered from the events""" + discovered + + """scanning is under process""" + scanning + + """ + deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live + """ + current + + """ + deployment becomes a past deployment because another fresh deployment has happened + """ + previous + + """deployment is blocked by the firewall""" + blocked +} + +input DeploymentStage_exact { + eq: DeploymentStage + in: [DeploymentStage] + le: DeploymentStage + lt: DeploymentStage + ge: DeploymentStage + gt: DeploymentStage + between: DeploymentStage +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget { + """id is randomly assigned""" + id: String! + name: String! + + """this would be the ip/server address of the target environment""" + ip: String! + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization(filter: OrganizationFilter): Organization! + defaultEnvironment(filter: EnvironmentFilter): Environment! +} + +type DeploymentTargetAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + ipMin: String + ipMax: String + accountMin: String + accountMax: String + targetTypeMin: String + targetTypeMax: String + regionMin: String + regionMax: String + kubescapeServiceConnectedMin: String + kubescapeServiceConnectedMax: String +} + +input DeploymentTargetFilter { + id: StringHashFilter + name: StringExactFilter_StringRegExpFilter + ip: StringExactFilter + has: [DeploymentTargetHasFilter] + and: [DeploymentTargetFilter] + or: [DeploymentTargetFilter] + not: DeploymentTargetFilter +} + +enum DeploymentTargetHasFilter { + id + name + ip + account + targetType + region + kubescapeServiceConnected + isFirewall + organization + defaultEnvironment +} + +input DeploymentTargetOrder { + asc: DeploymentTargetOrderable + desc: DeploymentTargetOrderable + then: DeploymentTargetOrder +} + +enum DeploymentTargetOrderable { + id + name + ip + account + targetType + region + kubescapeServiceConnected +} + +input DeploymentTargetPatch { + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +input DeploymentTargetRef { + """id is randomly assigned""" + id: String + name: String + + """this would be the ip/server address of the target environment""" + ip: String + account: String + + """this would be something like aws, gcp etc""" + targetType: String + + """this would be something like us-east-1 etc""" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: OrganizationRef + defaultEnvironment: EnvironmentRef +} + +enum DgraphIndex { + int + int64 + float + bool + hash + exact + term + fulltext + trigram + regexp + year + month + day + hour + geo +} + +"""Environment can be things like dev, prod, staging etc.""" +type Environment { + id: String! + organization(filter: OrganizationFilter): Organization! + purpose: String! +} + +type EnvironmentAggregateResult { + count: Int + idMin: String + idMax: String + purposeMin: String + purposeMax: String +} + +input EnvironmentFilter { + id: StringHashFilter + purpose: StringExactFilter + has: [EnvironmentHasFilter] + and: [EnvironmentFilter] + or: [EnvironmentFilter] + not: EnvironmentFilter +} + +enum EnvironmentHasFilter { + id + organization + purpose +} + +input EnvironmentOrder { + asc: EnvironmentOrderable + desc: EnvironmentOrderable + then: EnvironmentOrder +} + +enum EnvironmentOrderable { + id + purpose +} + +input EnvironmentPatch { + organization: OrganizationRef + purpose: String +} + +input EnvironmentRef { + id: String + organization: OrganizationRef + purpose: String +} + +type FeatureMode { + id: String! + organization(filter: OrganizationFilter): Organization! + scan: String! + type: String! + enabled: Boolean! + category: String! + createdAt: DateTime! + updatedAt: DateTime! +} + +type FeatureModeAggregateResult { + count: Int + idMin: String + idMax: String + scanMin: String + scanMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input FeatureModeFilter { + id: StringHashFilter + scan: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [FeatureModeHasFilter] + and: [FeatureModeFilter] + or: [FeatureModeFilter] + not: FeatureModeFilter +} + +enum FeatureModeHasFilter { + id + organization + scan + type + enabled + category + createdAt + updatedAt +} + +input FeatureModeOrder { + asc: FeatureModeOrderable + desc: FeatureModeOrderable + then: FeatureModeOrder +} + +enum FeatureModeOrderable { + id + scan + type + category + createdAt + updatedAt +} + +input FeatureModePatch { + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FeatureModeRef { + id: String + organization: OrganizationRef + scan: String + type: String + enabled: Boolean + category: String + createdAt: DateTime + updatedAt: DateTime +} + +input FloatFilter { + eq: Float + in: [Float] + le: Float + lt: Float + ge: Float + gt: Float + between: FloatRange +} + +input FloatRange { + min: Float! + max: Float! +} + +input GenerateMutationParams { + add: Boolean + update: Boolean + delete: Boolean +} + +input GenerateQueryParams { + get: Boolean + query: Boolean + password: Boolean + aggregate: Boolean +} + +enum HTTPMethod { + GET + POST + PUT + PATCH + DELETE +} + +""" +The Int64 scalar type represents a signed 64‐bit numeric non‐fractional value. +Int64 can represent values in range [-(2^63),(2^63 - 1)]. +""" +scalar Int64 + +input Int64Filter { + eq: Int64 + in: [Int64] + le: Int64 + lt: Int64 + ge: Int64 + gt: Int64 + between: Int64Range +} + +input Int64Range { + min: Int64! + max: Int64! +} + +type Integrator { + id: String! + organization(filter: OrganizationFilter): Organization! + name: String! + type: String! + category: String! + credentials(filter: CredentialsFilter): Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type IntegratorAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + typeMin: String + typeMax: String + categoryMin: String + categoryMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input IntegratorFilter { + id: StringHashFilter + name: StringExactFilter + type: StringExactFilter + category: StringExactFilter + has: [IntegratorHasFilter] + and: [IntegratorFilter] + or: [IntegratorFilter] + not: IntegratorFilter +} + +enum IntegratorHasFilter { + id + organization + name + type + category + credentials + createdAt + updatedAt +} + +input IntegratorOrder { + asc: IntegratorOrderable + desc: IntegratorOrderable + then: IntegratorOrder +} + +enum IntegratorOrderable { + id + name + type + category + createdAt + updatedAt +} + +input IntegratorPatch { + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntegratorRef { + id: String + organization: OrganizationRef + name: String + type: String + category: String + credentials: CredentialsRef + createdAt: DateTime + updatedAt: DateTime +} + +input IntersectsFilter { + polygon: PolygonRef + multiPolygon: MultiPolygonRef +} + +input IntFilter { + eq: Int + in: [Int] + le: Int + lt: Int + ge: Int + gt: Int + between: IntRange +} + +input IntRange { + min: Int! + max: Int! +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! + name: String! + value: String! +} + +type KeyValueAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String + valueMin: String + valueMax: String +} + +input KeyValueFilter { + id: StringHashFilter + has: [KeyValueHasFilter] + and: [KeyValueFilter] + or: [KeyValueFilter] + not: KeyValueFilter +} + +enum KeyValueHasFilter { + id + name + value +} + +input KeyValueOrder { + asc: KeyValueOrderable + desc: KeyValueOrderable + then: KeyValueOrder +} + +enum KeyValueOrderable { + id + name + value +} + +input KeyValuePatch { + name: String + value: String +} + +input KeyValueRef { + id: String + name: String + value: String +} + +enum Mode { + BATCH + SINGLE +} + +type MultiPolygon { + polygons: [Polygon!]! +} + +input MultiPolygonRef { + polygons: [PolygonRef!]! +} + +type Mutation { + addSchemaVersion(input: [AddSchemaVersionInput!]!): AddSchemaVersionPayload + updateSchemaVersion(input: UpdateSchemaVersionInput!): UpdateSchemaVersionPayload + deleteSchemaVersion(filter: SchemaVersionFilter!): DeleteSchemaVersionPayload + updateRBAC(input: UpdateRBACInput!): UpdateRBACPayload + deleteRBAC(filter: RBACFilter!): DeleteRBACPayload + addRole(input: [AddRoleInput!]!, upsert: Boolean): AddRolePayload + updateRole(input: UpdateRoleInput!): UpdateRolePayload + deleteRole(filter: RoleFilter!): DeleteRolePayload + addKeyValue(input: [AddKeyValueInput!]!, upsert: Boolean): AddKeyValuePayload + updateKeyValue(input: UpdateKeyValueInput!): UpdateKeyValuePayload + deleteKeyValue(filter: KeyValueFilter!): DeleteKeyValuePayload + addOrganization(input: [AddOrganizationInput!]!, upsert: Boolean): AddOrganizationPayload + updateOrganization(input: UpdateOrganizationInput!): UpdateOrganizationPayload + deleteOrganization(filter: OrganizationFilter!): DeleteOrganizationPayload + addEnvironment(input: [AddEnvironmentInput!]!, upsert: Boolean): AddEnvironmentPayload + updateEnvironment(input: UpdateEnvironmentInput!): UpdateEnvironmentPayload + deleteEnvironment(filter: EnvironmentFilter!): DeleteEnvironmentPayload + addDeploymentTarget(input: [AddDeploymentTargetInput!]!, upsert: Boolean): AddDeploymentTargetPayload + updateDeploymentTarget(input: UpdateDeploymentTargetInput!): UpdateDeploymentTargetPayload + deleteDeploymentTarget(filter: DeploymentTargetFilter!): DeleteDeploymentTargetPayload + addTeam(input: [AddTeamInput!]!, upsert: Boolean): AddTeamPayload + updateTeam(input: UpdateTeamInput!): UpdateTeamPayload + deleteTeam(filter: TeamFilter!): DeleteTeamPayload + addApplication(input: [AddApplicationInput!]!, upsert: Boolean): AddApplicationPayload + updateApplication(input: UpdateApplicationInput!): UpdateApplicationPayload + deleteApplication(filter: ApplicationFilter!): DeleteApplicationPayload + addApplicationEnvironment(input: [AddApplicationEnvironmentInput!]!, upsert: Boolean): AddApplicationEnvironmentPayload + updateApplicationEnvironment(input: UpdateApplicationEnvironmentInput!): UpdateApplicationEnvironmentPayload + deleteApplicationEnvironment(filter: ApplicationEnvironmentFilter!): DeleteApplicationEnvironmentPayload + addApplicationRiskStatus(input: [AddApplicationRiskStatusInput!]!): AddApplicationRiskStatusPayload + updateApplicationRiskStatus(input: UpdateApplicationRiskStatusInput!): UpdateApplicationRiskStatusPayload + deleteApplicationRiskStatus(filter: ApplicationRiskStatusFilter!): DeleteApplicationRiskStatusPayload + addApplicationDeployment(input: [AddApplicationDeploymentInput!]!, upsert: Boolean): AddApplicationDeploymentPayload + updateApplicationDeployment(input: UpdateApplicationDeploymentInput!): UpdateApplicationDeploymentPayload + deleteApplicationDeployment(filter: ApplicationDeploymentFilter!): DeleteApplicationDeploymentPayload + addToolsUsed(input: [AddToolsUsedInput!]!): AddToolsUsedPayload + updateToolsUsed(input: UpdateToolsUsedInput!): UpdateToolsUsedPayload + deleteToolsUsed(filter: ToolsUsedFilter!): DeleteToolsUsedPayload + addApplicationDeploymentRisk(input: [AddApplicationDeploymentRiskInput!]!): AddApplicationDeploymentRiskPayload + updateApplicationDeploymentRisk(input: UpdateApplicationDeploymentRiskInput!): UpdateApplicationDeploymentRiskPayload + deleteApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter!): DeleteApplicationDeploymentRiskPayload + addIntegrator(input: [AddIntegratorInput!]!, upsert: Boolean): AddIntegratorPayload + updateIntegrator(input: UpdateIntegratorInput!): UpdateIntegratorPayload + deleteIntegrator(filter: IntegratorFilter!): DeleteIntegratorPayload + addCredentials(input: [AddCredentialsInput!]!): AddCredentialsPayload + updateCredentials(input: UpdateCredentialsInput!): UpdateCredentialsPayload + deleteCredentials(filter: CredentialsFilter!): DeleteCredentialsPayload + addFeatureMode(input: [AddFeatureModeInput!]!, upsert: Boolean): AddFeatureModePayload + updateFeatureMode(input: UpdateFeatureModeInput!): UpdateFeatureModePayload + deleteFeatureMode(filter: FeatureModeFilter!): DeleteFeatureModePayload + addTag(input: [AddTagInput!]!, upsert: Boolean): AddTagPayload + updateTag(input: UpdateTagInput!): UpdateTagPayload + deleteTag(filter: TagFilter!): DeleteTagPayload + addPolicyDefinition(input: [AddPolicyDefinitionInput!]!, upsert: Boolean): AddPolicyDefinitionPayload + updatePolicyDefinition(input: UpdatePolicyDefinitionInput!): UpdatePolicyDefinitionPayload + deletePolicyDefinition(filter: PolicyDefinitionFilter!): DeletePolicyDefinitionPayload + addPolicyEnforcement(input: [AddPolicyEnforcementInput!]!): AddPolicyEnforcementPayload + updatePolicyEnforcement(input: UpdatePolicyEnforcementInput!): UpdatePolicyEnforcementPayload + deletePolicyEnforcement(filter: PolicyEnforcementFilter!): DeletePolicyEnforcementPayload + addRunHistory(input: [AddRunHistoryInput!]!): AddRunHistoryPayload + updateRunHistory(input: UpdateRunHistoryInput!): UpdateRunHistoryPayload + deleteRunHistory(filter: RunHistoryFilter!): DeleteRunHistoryPayload + addSecurityIssue(input: [AddSecurityIssueInput!]!): AddSecurityIssuePayload + updateSecurityIssue(input: UpdateSecurityIssueInput!): UpdateSecurityIssuePayload + deleteSecurityIssue(filter: SecurityIssueFilter!): DeleteSecurityIssuePayload + addBuildTool(input: [AddBuildToolInput!]!, upsert: Boolean): AddBuildToolPayload + updateBuildTool(input: UpdateBuildToolInput!): UpdateBuildToolPayload + deleteBuildTool(filter: BuildToolFilter!): DeleteBuildToolPayload + addSourceCodeTool(input: [AddSourceCodeToolInput!]!, upsert: Boolean): AddSourceCodeToolPayload + updateSourceCodeTool(input: UpdateSourceCodeToolInput!): UpdateSourceCodeToolPayload + deleteSourceCodeTool(filter: SourceCodeToolFilter!): DeleteSourceCodeToolPayload + addCommitMetaData(input: [AddCommitMetaDataInput!]!): AddCommitMetaDataPayload + updateCommitMetaData(input: UpdateCommitMetaDataInput!): UpdateCommitMetaDataPayload + deleteCommitMetaData(filter: CommitMetaDataFilter!): DeleteCommitMetaDataPayload + addArtifact(input: [AddArtifactInput!]!, upsert: Boolean): AddArtifactPayload + updateArtifact(input: UpdateArtifactInput!): UpdateArtifactPayload + deleteArtifact(filter: ArtifactFilter!): DeleteArtifactPayload + addArtifactScanData(input: [AddArtifactScanDataInput!]!, upsert: Boolean): AddArtifactScanDataPayload + updateArtifactScanData(input: UpdateArtifactScanDataInput!): UpdateArtifactScanDataPayload + deleteArtifactScanData(filter: ArtifactScanDataFilter!): DeleteArtifactScanDataPayload + addComponent(input: [AddComponentInput!]!, upsert: Boolean): AddComponentPayload + updateComponent(input: UpdateComponentInput!): UpdateComponentPayload + deleteComponent(filter: ComponentFilter!): DeleteComponentPayload + addVulnerability(input: [AddVulnerabilityInput!]!, upsert: Boolean): AddVulnerabilityPayload + updateVulnerability(input: UpdateVulnerabilityInput!): UpdateVulnerabilityPayload + deleteVulnerability(filter: VulnerabilityFilter!): DeleteVulnerabilityPayload + addCWE(input: [AddCWEInput!]!, upsert: Boolean): AddCWEPayload + updateCWE(input: UpdateCWEInput!): UpdateCWEPayload + deleteCWE(filter: CWEFilter!): DeleteCWEPayload +} + +input NearFilter { + distance: Float! + coordinate: PointRef! +} + +type Organization implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + teams(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team!] + environments(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + integrators(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator!] + featureModes(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + teamsAggregate(filter: TeamFilter): TeamAggregateResult + environmentsAggregate(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + integratorsAggregate(filter: IntegratorFilter): IntegratorAggregateResult + featureModesAggregate(filter: FeatureModeFilter): FeatureModeAggregateResult +} + +type OrganizationAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input OrganizationFilter { + id: StringHashFilter + name: StringExactFilter + has: [OrganizationHasFilter] + and: [OrganizationFilter] + or: [OrganizationFilter] + not: OrganizationFilter +} + +enum OrganizationHasFilter { + id + name + roles + teams + environments + policies + policyEnforcements + integrators + featureModes +} + +input OrganizationOrder { + asc: OrganizationOrderable + desc: OrganizationOrderable + then: OrganizationOrder +} + +enum OrganizationOrderable { + id + name +} + +input OrganizationPatch { + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +input OrganizationRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + teams: [TeamRef!] + environments: [DeploymentTargetRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] + integrators: [IntegratorRef!] + featureModes: [FeatureModeRef!] +} + +type Point { + longitude: Float! + latitude: Float! +} + +input PointGeoFilter { + near: NearFilter + within: WithinFilter +} + +type PointList { + points: [Point!]! +} + +input PointListRef { + points: [PointRef!]! +} + +input PointRef { + longitude: Float! + latitude: Float! +} + +type PolicyDefinition { + id: String! + ownerOrg(filter: OrganizationFilter): Organization! + ownerTeam(filter: TeamFilter): Team + ownerApplication(filter: ApplicationFilter): Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! + category: String! + stage: String! + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyDefinitionAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime + policyNameMin: String + policyNameMax: String + categoryMin: String + categoryMax: String + stageMin: String + stageMax: String + descriptionMin: String + descriptionMax: String + scriptMin: String + scriptMax: String + variablesMin: String + variablesMax: String + conditionNameMin: String + conditionNameMax: String + suggestionMin: String + suggestionMax: String +} + +input PolicyDefinitionFilter { + id: StringHashFilter + policyName: StringExactFilter + category: StringExactFilter + stage: StringExactFilter + description: StringExactFilter + scheduledPolicy: Boolean + script: StringExactFilter + variables: StringExactFilter + conditionName: StringExactFilter + suggestion: StringExactFilter + has: [PolicyDefinitionHasFilter] + and: [PolicyDefinitionFilter] + or: [PolicyDefinitionFilter] + not: PolicyDefinitionFilter +} + +enum PolicyDefinitionHasFilter { + id + ownerOrg + ownerTeam + ownerApplication + createdAt + updatedAt + policyName + category + stage + description + scheduledPolicy + script + variables + conditionName + suggestion +} + +input PolicyDefinitionOrder { + asc: PolicyDefinitionOrderable + desc: PolicyDefinitionOrderable + then: PolicyDefinitionOrder +} + +enum PolicyDefinitionOrderable { + id + createdAt + updatedAt + policyName + category + stage + description + script + variables + conditionName + suggestion +} + +input PolicyDefinitionPatch { + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +input PolicyDefinitionRef { + id: String + ownerOrg: OrganizationRef + ownerTeam: TeamRef + ownerApplication: ApplicationRef + createdAt: DateTime + updatedAt: DateTime + policyName: String + category: String + stage: String + description: String + scheduledPolicy: Boolean + script: String + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy(filter: PolicyDefinitionFilter): PolicyDefinition! + enforcedOrg(filter: OrganizationFilter): Organization + enforcedTeam(filter: TeamFilter): Team + enforcedApplication(filter: ApplicationFilter): Application + status: Boolean! + forceApply: Boolean + severity: Severity! + datasourceTool: String! + action: String! + conditionValue: String + environments(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment!] + tags(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag!] + createdAt: DateTime! + updatedAt: DateTime! + environmentsAggregate(filter: EnvironmentFilter): EnvironmentAggregateResult + tagsAggregate(filter: TagFilter): TagAggregateResult +} + +type PolicyEnforcementAggregateResult { + count: Int + datasourceToolMin: String + datasourceToolMax: String + actionMin: String + actionMax: String + conditionValueMin: String + conditionValueMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input PolicyEnforcementFilter { + id: [ID!] + status: Boolean + forceApply: Boolean + datasourceTool: StringExactFilter + action: StringExactFilter + conditionValue: StringExactFilter + has: [PolicyEnforcementHasFilter] + and: [PolicyEnforcementFilter] + or: [PolicyEnforcementFilter] + not: PolicyEnforcementFilter +} + +enum PolicyEnforcementHasFilter { + policy + enforcedOrg + enforcedTeam + enforcedApplication + status + forceApply + severity + datasourceTool + action + conditionValue + environments + tags + createdAt + updatedAt +} + +input PolicyEnforcementOrder { + asc: PolicyEnforcementOrderable + desc: PolicyEnforcementOrderable + then: PolicyEnforcementOrder +} + +enum PolicyEnforcementOrderable { + datasourceTool + action + conditionValue + createdAt + updatedAt +} + +input PolicyEnforcementPatch { + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +input PolicyEnforcementRef { + id: ID + policy: PolicyDefinitionRef + enforcedOrg: OrganizationRef + enforcedTeam: TeamRef + enforcedApplication: ApplicationRef + status: Boolean + forceApply: Boolean + severity: Severity + datasourceTool: String + action: String + conditionValue: String + environments: [EnvironmentRef!] + tags: [TagRef!] + createdAt: DateTime + updatedAt: DateTime +} + +type Polygon { + coordinates: [PointList!]! +} + +input PolygonGeoFilter { + near: NearFilter + within: WithinFilter + contains: ContainsFilter + intersects: IntersectsFilter +} + +input PolygonRef { + coordinates: [PointListRef!]! +} + +type Query { + querySchemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + aggregateSchemaVersion(filter: SchemaVersionFilter): SchemaVersionAggregateResult + queryRBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + aggregateRBAC(filter: RBACFilter): RBACAggregateResult + getRole(id: String!): Role + queryRole(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + aggregateRole(filter: RoleFilter): RoleAggregateResult + getKeyValue(id: String!): KeyValue + queryKeyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + aggregateKeyValue(filter: KeyValueFilter): KeyValueAggregateResult + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getEnvironment(id: String!): Environment + queryEnvironment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + aggregateEnvironment(filter: EnvironmentFilter): EnvironmentAggregateResult + getDeploymentTarget(id: String!): DeploymentTarget + queryDeploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + aggregateDeploymentTarget(filter: DeploymentTargetFilter): DeploymentTargetAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult + getApplicationRiskStatus(id: ID!): ApplicationRiskStatus + queryApplicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + aggregateApplicationRiskStatus(filter: ApplicationRiskStatusFilter): ApplicationRiskStatusAggregateResult + getApplicationDeployment(id: String!): ApplicationDeployment + queryApplicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + aggregateApplicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeploymentAggregateResult + getToolsUsed(id: ID!): ToolsUsed + queryToolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + aggregateToolsUsed(filter: ToolsUsedFilter): ToolsUsedAggregateResult + getApplicationDeploymentRisk(id: ID!): ApplicationDeploymentRisk + queryApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + aggregateApplicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter): ApplicationDeploymentRiskAggregateResult + getIntegrator(id: String!): Integrator + queryIntegrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + aggregateIntegrator(filter: IntegratorFilter): IntegratorAggregateResult + getCredentials(id: ID!): Credentials + queryCredentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + aggregateCredentials(filter: CredentialsFilter): CredentialsAggregateResult + getFeatureMode(id: String!): FeatureMode + queryFeatureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + aggregateFeatureMode(filter: FeatureModeFilter): FeatureModeAggregateResult + getTag(id: String!): Tag + queryTag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + aggregateTag(filter: TagFilter): TagAggregateResult + getPolicyDefinition(id: String!): PolicyDefinition + queryPolicyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + aggregatePolicyDefinition(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + getPolicyEnforcement(id: ID!): PolicyEnforcement + queryPolicyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + aggregatePolicyEnforcement(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult + getRunHistory(id: ID!): RunHistory + queryRunHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + aggregateRunHistory(filter: RunHistoryFilter): RunHistoryAggregateResult + getSecurityIssue(id: ID!): SecurityIssue + querySecurityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + aggregateSecurityIssue(filter: SecurityIssueFilter): SecurityIssueAggregateResult + getBuildTool(id: String!): BuildTool + queryBuildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + aggregateBuildTool(filter: BuildToolFilter): BuildToolAggregateResult + getSourceCodeTool(id: String!): SourceCodeTool + querySourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + aggregateSourceCodeTool(filter: SourceCodeToolFilter): SourceCodeToolAggregateResult + getCommitMetaData(id: ID!): CommitMetaData + queryCommitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + aggregateCommitMetaData(filter: CommitMetaDataFilter): CommitMetaDataAggregateResult + getArtifact(id: String!): Artifact + queryArtifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + aggregateArtifact(filter: ArtifactFilter): ArtifactAggregateResult + getArtifactScanData(id: String!): ArtifactScanData + queryArtifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + aggregateArtifactScanData(filter: ArtifactScanDataFilter): ArtifactScanDataAggregateResult + getComponent(id: String!): Component + queryComponent(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + aggregateComponent(filter: ComponentFilter): ComponentAggregateResult + getVulnerability(id: String!): Vulnerability + queryVulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + aggregateVulnerability(filter: VulnerabilityFilter): VulnerabilityAggregateResult + getCWE(id: String!): CWE + queryCWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + aggregateCWE(filter: CWEFilter): CWEAggregateResult +} + +interface RBAC { + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult +} + +type RBACAggregateResult { + count: Int +} + +input RBACFilter { + has: [RBACHasFilter] + and: [RBACFilter] + or: [RBACFilter] + not: RBACFilter +} + +enum RBACHasFilter { + roles +} + +input RBACPatch { + roles: [RoleRef!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + scanning +} + +input RiskStatus_exact { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus +} + +input RiskStatus_exact_StringRegExpFilter { + eq: RiskStatus + in: [RiskStatus] + le: RiskStatus + lt: RiskStatus + ge: RiskStatus + gt: RiskStatus + between: RiskStatus + regexp: String +} + +type Role { + """id is randomly assigned""" + id: String! + + """group should be a URI format that includes a scope or realm""" + group: String! + permission: RolePermission! +} + +type RoleAggregateResult { + count: Int + idMin: String + idMax: String + groupMin: String + groupMax: String +} + +input RoleFilter { + id: StringHashFilter + group: StringHashFilter + permission: RolePermission_hash + has: [RoleHasFilter] + and: [RoleFilter] + or: [RoleFilter] + not: RoleFilter +} + +enum RoleHasFilter { + id + group + permission +} + +input RoleOrder { + asc: RoleOrderable + desc: RoleOrderable + then: RoleOrder +} + +enum RoleOrderable { + id + group +} + +input RolePatch { + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +enum RolePermission { + admin + write + read +} + +input RolePermission_hash { + eq: RolePermission + in: [RolePermission] +} + +input RoleRef { + """id is randomly assigned""" + id: String + + """group should be a URI format that includes a scope or realm""" + group: String + permission: RolePermission +} + +type RunHistory { + id: ID! + policyId: String! + applicationDeployment(filter: ApplicationDeploymentFilter): ApplicationDeployment + artifactScan(filter: ArtifactScanDataFilter): ArtifactScanData + PolicyName: String! + Stage: String! + Artifact: String! + ArtifactTag: String! + ArtifactSha: String! + ArtifactNameTag: String! + DatasourceTool: String! + CreatedAt: DateTime! + UpdatedAt: DateTime! + DeployedAt: DateTime! + Hash: String + Pass: Boolean! + MetaData: String + FileApi: String + scheduledPolicy: Boolean! + policyEnforcements(filter: PolicyEnforcementFilter): PolicyEnforcement! + securityIssue(filter: SecurityIssueFilter): SecurityIssue +} + +type RunHistoryAggregateResult { + count: Int + policyIdMin: String + policyIdMax: String + PolicyNameMin: String + PolicyNameMax: String + StageMin: String + StageMax: String + ArtifactMin: String + ArtifactMax: String + ArtifactTagMin: String + ArtifactTagMax: String + ArtifactShaMin: String + ArtifactShaMax: String + ArtifactNameTagMin: String + ArtifactNameTagMax: String + DatasourceToolMin: String + DatasourceToolMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + DeployedAtMin: DateTime + DeployedAtMax: DateTime + HashMin: String + HashMax: String + MetaDataMin: String + MetaDataMax: String + FileApiMin: String + FileApiMax: String +} + +input RunHistoryFilter { + id: [ID!] + policyId: StringExactFilter + PolicyName: StringExactFilter + Stage: StringExactFilter + Artifact: StringExactFilter + ArtifactTag: StringExactFilter + ArtifactSha: StringExactFilter + ArtifactNameTag: StringExactFilter_StringRegExpFilter + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + DeployedAt: DateTimeFilter + Pass: Boolean + scheduledPolicy: Boolean + has: [RunHistoryHasFilter] + and: [RunHistoryFilter] + or: [RunHistoryFilter] + not: RunHistoryFilter +} + +enum RunHistoryHasFilter { + policyId + applicationDeployment + artifactScan + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + Pass + MetaData + FileApi + scheduledPolicy + policyEnforcements + securityIssue +} + +input RunHistoryOrder { + asc: RunHistoryOrderable + desc: RunHistoryOrderable + then: RunHistoryOrder +} + +enum RunHistoryOrderable { + policyId + PolicyName + Stage + Artifact + ArtifactTag + ArtifactSha + ArtifactNameTag + DatasourceTool + CreatedAt + UpdatedAt + DeployedAt + Hash + MetaData + FileApi +} + +input RunHistoryPatch { + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +input RunHistoryRef { + id: ID + policyId: String + applicationDeployment: ApplicationDeploymentRef + artifactScan: ArtifactScanDataRef + PolicyName: String + Stage: String + Artifact: String + ArtifactTag: String + ArtifactSha: String + ArtifactNameTag: String + DatasourceTool: String + CreatedAt: DateTime + UpdatedAt: DateTime + DeployedAt: DateTime + Hash: String + Pass: Boolean + MetaData: String + FileApi: String + scheduledPolicy: Boolean + policyEnforcements: PolicyEnforcementRef + securityIssue: SecurityIssueRef +} + +type SchemaVersion { + version: String! +} + +type SchemaVersionAggregateResult { + count: Int + versionMin: String + versionMax: String +} + +input SchemaVersionFilter { + has: [SchemaVersionHasFilter] + and: [SchemaVersionFilter] + or: [SchemaVersionFilter] + not: SchemaVersionFilter +} + +enum SchemaVersionHasFilter { + version +} + +input SchemaVersionOrder { + asc: SchemaVersionOrderable + desc: SchemaVersionOrderable + then: SchemaVersionOrder +} + +enum SchemaVersionOrderable { + version +} + +input SchemaVersionPatch { + version: String +} + +input SchemaVersionRef { + version: String +} + +type SecurityIssue { + id: ID! + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity! + CreatedAt: DateTime! + UpdatedAt: DateTime! + Action: String! + JiraUrl: String + Status: String! + Reason: String + Error: String + Affects(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory!] + AffectsAggregate(filter: RunHistoryFilter): RunHistoryAggregateResult +} + +type SecurityIssueAggregateResult { + count: Int + AlertTitleMin: String + AlertTitleMax: String + AlertMessageMin: String + AlertMessageMax: String + SuggestionsMin: String + SuggestionsMax: String + CreatedAtMin: DateTime + CreatedAtMax: DateTime + UpdatedAtMin: DateTime + UpdatedAtMax: DateTime + ActionMin: String + ActionMax: String + JiraUrlMin: String + JiraUrlMax: String + StatusMin: String + StatusMax: String + ReasonMin: String + ReasonMax: String + ErrorMin: String + ErrorMax: String +} + +input SecurityIssueFilter { + id: [ID!] + AlertTitle: StringExactFilter_StringRegExpFilter + AlertMessage: StringExactFilter + Suggestions: StringExactFilter + Severity: Severity_exact + CreatedAt: DateTimeFilter + UpdatedAt: DateTimeFilter + Action: StringExactFilter + Status: StringExactFilter + Reason: StringExactFilter + Error: StringExactFilter + has: [SecurityIssueHasFilter] + and: [SecurityIssueFilter] + or: [SecurityIssueFilter] + not: SecurityIssueFilter +} + +enum SecurityIssueHasFilter { + AlertTitle + AlertMessage + Suggestions + Severity + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error + Affects +} + +input SecurityIssueOrder { + asc: SecurityIssueOrderable + desc: SecurityIssueOrderable + then: SecurityIssueOrder +} + +enum SecurityIssueOrderable { + AlertTitle + AlertMessage + Suggestions + CreatedAt + UpdatedAt + Action + JiraUrl + Status + Reason + Error +} + +input SecurityIssuePatch { + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +input SecurityIssueRef { + id: ID + AlertTitle: String + AlertMessage: String + Suggestions: String + Severity: Severity + CreatedAt: DateTime + UpdatedAt: DateTime + Action: String + JiraUrl: String + Status: String + Reason: String + Error: String + Affects: [RunHistoryRef!] +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +input Severity_exact { + eq: Severity + in: [Severity] + le: Severity + lt: Severity + ge: Severity + gt: Severity + between: Severity +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + """id is randomly assigned""" + id: String! + createdAt: DateTime! + + """scm is the scm tool github/gitlab etc""" + scm: String! + + """repository is the git remote repository""" + repository: String! + + """branch is the git branch on which the artifact was built""" + branch: String! + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool(filter: BuildToolFilter): BuildTool! +} + +type SourceCodeToolAggregateResult { + count: Int + idMin: String + idMax: String + createdAtMin: DateTime + createdAtMax: DateTime + scmMin: String + scmMax: String + repositoryMin: String + repositoryMax: String + branchMin: String + branchMax: String + headCommitMin: String + headCommitMax: String + diffCommitsMin: String + diffCommitsMax: String + licenseNameMin: String + licenseNameMax: String + visibilityMin: String + visibilityMax: String + workflowNameMin: String + workflowNameMax: String + parentRepoMin: String + parentRepoMax: String +} + +input SourceCodeToolFilter { + id: StringHashFilter + repository: StringExactFilter_StringRegExpFilter + has: [SourceCodeToolHasFilter] + and: [SourceCodeToolFilter] + or: [SourceCodeToolFilter] + not: SourceCodeToolFilter +} + +enum SourceCodeToolHasFilter { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo + buildTool +} + +input SourceCodeToolOrder { + asc: SourceCodeToolOrderable + desc: SourceCodeToolOrderable + then: SourceCodeToolOrder +} + +enum SourceCodeToolOrderable { + id + createdAt + scm + repository + branch + headCommit + diffCommits + licenseName + visibility + workflowName + parentRepo +} + +input SourceCodeToolPatch { + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input SourceCodeToolRef { + """id is randomly assigned""" + id: String + createdAt: DateTime + + """scm is the scm tool github/gitlab etc""" + scm: String + + """repository is the git remote repository""" + repository: String + + """branch is the git branch on which the artifact was built""" + branch: String + + """headCommit is the checkout out head commit""" + headCommit: String + + """ + diffCommits is a comma separated string of the commits between the previous built artifact and the current + """ + diffCommits: String + licenseName: String + visibility: String + workflowName: String + + """parentRepo is populated in case the git repo is a fork""" + parentRepo: String + buildTool: BuildToolRef +} + +input StringExactFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange +} + +input StringExactFilter_StringRegExpFilter { + eq: String + in: [String] + le: String + lt: String + ge: String + gt: String + between: StringRange + regexp: String +} + +input StringFullTextFilter { + alloftext: String + anyoftext: String +} + +input StringHashFilter { + eq: String + in: [String] +} + +input StringRange { + min: String! + max: String! +} + +input StringRegExpFilter { + regexp: String +} + +input StringTermFilter { + allofterms: String + anyofterms: String +} + +type Subscription { + getOrganization(id: String!): Organization + queryOrganization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + aggregateOrganization(filter: OrganizationFilter): OrganizationAggregateResult + getTeam(id: String!): Team + queryTeam(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + aggregateTeam(filter: TeamFilter): TeamAggregateResult + getApplication(id: String!): Application + queryApplication(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + aggregateApplication(filter: ApplicationFilter): ApplicationAggregateResult + getApplicationEnvironment(id: String!): ApplicationEnvironment + queryApplicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + aggregateApplicationEnvironment(filter: ApplicationEnvironmentFilter): ApplicationEnvironmentAggregateResult +} + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! + tagName: String! + tagValue: String! + tagDescription: String + createdBy: String + createdAt: DateTime! + updatedAt: DateTime! + policies(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + policiesAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TagAggregateResult { + count: Int + idMin: String + idMax: String + tagNameMin: String + tagNameMax: String + tagValueMin: String + tagValueMax: String + tagDescriptionMin: String + tagDescriptionMax: String + createdByMin: String + createdByMax: String + createdAtMin: DateTime + createdAtMax: DateTime + updatedAtMin: DateTime + updatedAtMax: DateTime +} + +input TagFilter { + id: StringExactFilter + tagName: StringExactFilter + tagValue: StringExactFilter + createdBy: StringExactFilter + has: [TagHasFilter] + and: [TagFilter] + or: [TagFilter] + not: TagFilter +} + +enum TagHasFilter { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt + policies +} + +input TagOrder { + asc: TagOrderable + desc: TagOrderable + then: TagOrder +} + +enum TagOrderable { + id + tagName + tagValue + tagDescription + createdBy + createdAt + updatedAt +} + +input TagPatch { + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +input TagRef { + id: String + tagName: String + tagValue: String + tagDescription: String + createdBy: String + createdAt: DateTime + updatedAt: DateTime + policies: [PolicyEnforcementRef!] +} + +type Team implements RBAC { + """id is randomly assigned""" + id: String! + name: String! + roles(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role!] + organization(filter: OrganizationFilter): Organization! + applications(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application!] + labels(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue!] + policies(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition!] + policyEnforcements(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement!] + rolesAggregate(filter: RoleFilter): RoleAggregateResult + applicationsAggregate(filter: ApplicationFilter): ApplicationAggregateResult + labelsAggregate(filter: KeyValueFilter): KeyValueAggregateResult + policiesAggregate(filter: PolicyDefinitionFilter): PolicyDefinitionAggregateResult + policyEnforcementsAggregate(filter: PolicyEnforcementFilter): PolicyEnforcementAggregateResult +} + +type TeamAggregateResult { + count: Int + idMin: String + idMax: String + nameMin: String + nameMax: String +} + +input TeamFilter { + id: StringHashFilter + name: StringExactFilter + has: [TeamHasFilter] + and: [TeamFilter] + or: [TeamFilter] + not: TeamFilter +} + +enum TeamHasFilter { + id + name + roles + organization + applications + labels + policies + policyEnforcements +} + +input TeamOrder { + asc: TeamOrderable + desc: TeamOrderable + then: TeamOrder +} + +enum TeamOrderable { + id + name +} + +input TeamPatch { + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +input TeamRef { + """id is randomly assigned""" + id: String + name: String + roles: [RoleRef!] + organization: OrganizationRef + applications: [ApplicationRef!] + labels: [KeyValueRef!] + policies: [PolicyDefinitionRef!] + policyEnforcements: [PolicyEnforcementRef!] +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +type ToolsUsedAggregateResult { + count: Int + sourceMin: String + sourceMax: String + buildMin: String + buildMax: String + artifactMin: String + artifactMax: String + deployMin: String + deployMax: String + sbomMin: String + sbomMax: String +} + +input ToolsUsedFilter { + id: [ID!] + has: [ToolsUsedHasFilter] + and: [ToolsUsedFilter] + or: [ToolsUsedFilter] + not: ToolsUsedFilter +} + +enum ToolsUsedHasFilter { + source + build + artifact + deploy + sbom + misc +} + +input ToolsUsedOrder { + asc: ToolsUsedOrderable + desc: ToolsUsedOrderable + then: ToolsUsedOrder +} + +enum ToolsUsedOrderable { + source + build + artifact + deploy + sbom +} + +input ToolsUsedPatch { + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input ToolsUsedRef { + id: ID + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +input UpdateApplicationDeploymentInput { + filter: ApplicationDeploymentFilter! + set: ApplicationDeploymentPatch + remove: ApplicationDeploymentPatch +} + +type UpdateApplicationDeploymentPayload { + applicationDeployment(filter: ApplicationDeploymentFilter, order: ApplicationDeploymentOrder, first: Int, offset: Int): [ApplicationDeployment] + numUids: Int +} + +input UpdateApplicationDeploymentRiskInput { + filter: ApplicationDeploymentRiskFilter! + set: ApplicationDeploymentRiskPatch + remove: ApplicationDeploymentRiskPatch +} + +type UpdateApplicationDeploymentRiskPayload { + applicationDeploymentRisk(filter: ApplicationDeploymentRiskFilter, order: ApplicationDeploymentRiskOrder, first: Int, offset: Int): [ApplicationDeploymentRisk] + numUids: Int +} + +input UpdateApplicationEnvironmentInput { + filter: ApplicationEnvironmentFilter! + set: ApplicationEnvironmentPatch + remove: ApplicationEnvironmentPatch +} + +type UpdateApplicationEnvironmentPayload { + applicationEnvironment(filter: ApplicationEnvironmentFilter, order: ApplicationEnvironmentOrder, first: Int, offset: Int): [ApplicationEnvironment] + numUids: Int +} + +input UpdateApplicationInput { + filter: ApplicationFilter! + set: ApplicationPatch + remove: ApplicationPatch +} + +type UpdateApplicationPayload { + application(filter: ApplicationFilter, order: ApplicationOrder, first: Int, offset: Int): [Application] + numUids: Int +} + +input UpdateApplicationRiskStatusInput { + filter: ApplicationRiskStatusFilter! + set: ApplicationRiskStatusPatch + remove: ApplicationRiskStatusPatch +} + +type UpdateApplicationRiskStatusPayload { + applicationRiskStatus(filter: ApplicationRiskStatusFilter, order: ApplicationRiskStatusOrder, first: Int, offset: Int): [ApplicationRiskStatus] + numUids: Int +} + +input UpdateArtifactInput { + filter: ArtifactFilter! + set: ArtifactPatch + remove: ArtifactPatch +} + +type UpdateArtifactPayload { + artifact(filter: ArtifactFilter, order: ArtifactOrder, first: Int, offset: Int): [Artifact] + numUids: Int +} + +input UpdateArtifactScanDataInput { + filter: ArtifactScanDataFilter! + set: ArtifactScanDataPatch + remove: ArtifactScanDataPatch +} + +type UpdateArtifactScanDataPayload { + artifactScanData(filter: ArtifactScanDataFilter, order: ArtifactScanDataOrder, first: Int, offset: Int): [ArtifactScanData] + numUids: Int +} + +input UpdateBuildToolInput { + filter: BuildToolFilter! + set: BuildToolPatch + remove: BuildToolPatch +} + +type UpdateBuildToolPayload { + buildTool(filter: BuildToolFilter, order: BuildToolOrder, first: Int, offset: Int): [BuildTool] + numUids: Int +} + +input UpdateCommitMetaDataInput { + filter: CommitMetaDataFilter! + set: CommitMetaDataPatch + remove: CommitMetaDataPatch +} + +type UpdateCommitMetaDataPayload { + commitMetaData(filter: CommitMetaDataFilter, order: CommitMetaDataOrder, first: Int, offset: Int): [CommitMetaData] + numUids: Int +} + +input UpdateComponentInput { + filter: ComponentFilter! + set: ComponentPatch + remove: ComponentPatch +} + +type UpdateComponentPayload { + component(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component] + numUids: Int +} + +input UpdateCredentialsInput { + filter: CredentialsFilter! + set: CredentialsPatch + remove: CredentialsPatch +} + +type UpdateCredentialsPayload { + credentials(filter: CredentialsFilter, order: CredentialsOrder, first: Int, offset: Int): [Credentials] + numUids: Int +} + +input UpdateCWEInput { + filter: CWEFilter! + set: CWEPatch + remove: CWEPatch +} + +type UpdateCWEPayload { + cWE(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE] + numUids: Int +} + +input UpdateDeploymentTargetInput { + filter: DeploymentTargetFilter! + set: DeploymentTargetPatch + remove: DeploymentTargetPatch +} + +type UpdateDeploymentTargetPayload { + deploymentTarget(filter: DeploymentTargetFilter, order: DeploymentTargetOrder, first: Int, offset: Int): [DeploymentTarget] + numUids: Int +} + +input UpdateEnvironmentInput { + filter: EnvironmentFilter! + set: EnvironmentPatch + remove: EnvironmentPatch +} + +type UpdateEnvironmentPayload { + environment(filter: EnvironmentFilter, order: EnvironmentOrder, first: Int, offset: Int): [Environment] + numUids: Int +} + +input UpdateFeatureModeInput { + filter: FeatureModeFilter! + set: FeatureModePatch + remove: FeatureModePatch +} + +type UpdateFeatureModePayload { + featureMode(filter: FeatureModeFilter, order: FeatureModeOrder, first: Int, offset: Int): [FeatureMode] + numUids: Int +} + +input UpdateIntegratorInput { + filter: IntegratorFilter! + set: IntegratorPatch + remove: IntegratorPatch +} + +type UpdateIntegratorPayload { + integrator(filter: IntegratorFilter, order: IntegratorOrder, first: Int, offset: Int): [Integrator] + numUids: Int +} + +input UpdateKeyValueInput { + filter: KeyValueFilter! + set: KeyValuePatch + remove: KeyValuePatch +} + +type UpdateKeyValuePayload { + keyValue(filter: KeyValueFilter, order: KeyValueOrder, first: Int, offset: Int): [KeyValue] + numUids: Int +} + +input UpdateOrganizationInput { + filter: OrganizationFilter! + set: OrganizationPatch + remove: OrganizationPatch +} + +type UpdateOrganizationPayload { + organization(filter: OrganizationFilter, order: OrganizationOrder, first: Int, offset: Int): [Organization] + numUids: Int +} + +input UpdatePolicyDefinitionInput { + filter: PolicyDefinitionFilter! + set: PolicyDefinitionPatch + remove: PolicyDefinitionPatch +} + +type UpdatePolicyDefinitionPayload { + policyDefinition(filter: PolicyDefinitionFilter, order: PolicyDefinitionOrder, first: Int, offset: Int): [PolicyDefinition] + numUids: Int +} + +input UpdatePolicyEnforcementInput { + filter: PolicyEnforcementFilter! + set: PolicyEnforcementPatch + remove: PolicyEnforcementPatch +} + +type UpdatePolicyEnforcementPayload { + policyEnforcement(filter: PolicyEnforcementFilter, order: PolicyEnforcementOrder, first: Int, offset: Int): [PolicyEnforcement] + numUids: Int +} + +input UpdateRBACInput { + filter: RBACFilter! + set: RBACPatch + remove: RBACPatch +} + +type UpdateRBACPayload { + rBAC(filter: RBACFilter, first: Int, offset: Int): [RBAC] + numUids: Int +} + +input UpdateRoleInput { + filter: RoleFilter! + set: RolePatch + remove: RolePatch +} + +type UpdateRolePayload { + role(filter: RoleFilter, order: RoleOrder, first: Int, offset: Int): [Role] + numUids: Int +} + +input UpdateRunHistoryInput { + filter: RunHistoryFilter! + set: RunHistoryPatch + remove: RunHistoryPatch +} + +type UpdateRunHistoryPayload { + runHistory(filter: RunHistoryFilter, order: RunHistoryOrder, first: Int, offset: Int): [RunHistory] + numUids: Int +} + +input UpdateSchemaVersionInput { + filter: SchemaVersionFilter! + set: SchemaVersionPatch + remove: SchemaVersionPatch +} + +type UpdateSchemaVersionPayload { + schemaVersion(filter: SchemaVersionFilter, order: SchemaVersionOrder, first: Int, offset: Int): [SchemaVersion] + numUids: Int +} + +input UpdateSecurityIssueInput { + filter: SecurityIssueFilter! + set: SecurityIssuePatch + remove: SecurityIssuePatch +} + +type UpdateSecurityIssuePayload { + securityIssue(filter: SecurityIssueFilter, order: SecurityIssueOrder, first: Int, offset: Int): [SecurityIssue] + numUids: Int +} + +input UpdateSourceCodeToolInput { + filter: SourceCodeToolFilter! + set: SourceCodeToolPatch + remove: SourceCodeToolPatch +} + +type UpdateSourceCodeToolPayload { + sourceCodeTool(filter: SourceCodeToolFilter, order: SourceCodeToolOrder, first: Int, offset: Int): [SourceCodeTool] + numUids: Int +} + +input UpdateTagInput { + filter: TagFilter! + set: TagPatch + remove: TagPatch +} + +type UpdateTagPayload { + tag(filter: TagFilter, order: TagOrder, first: Int, offset: Int): [Tag] + numUids: Int +} + +input UpdateTeamInput { + filter: TeamFilter! + set: TeamPatch + remove: TeamPatch +} + +type UpdateTeamPayload { + team(filter: TeamFilter, order: TeamOrder, first: Int, offset: Int): [Team] + numUids: Int +} + +input UpdateToolsUsedInput { + filter: ToolsUsedFilter! + set: ToolsUsedPatch + remove: ToolsUsedPatch +} + +type UpdateToolsUsedPayload { + toolsUsed(filter: ToolsUsedFilter, order: ToolsUsedOrder, first: Int, offset: Int): [ToolsUsed] + numUids: Int +} + +input UpdateVulnerabilityInput { + filter: VulnerabilityFilter! + set: VulnerabilityPatch + remove: VulnerabilityPatch +} + +type UpdateVulnerabilityPayload { + vulnerability(filter: VulnerabilityFilter, order: VulnerabilityOrder, first: Int, offset: Int): [Vulnerability] + numUids: Int +} + +type Vulnerability { + id: String! + parent: String! + ratings: Severity + cwes(filter: CWEFilter, order: CWEOrder, first: Int, offset: Int): [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects(filter: ComponentFilter, order: ComponentOrder, first: Int, offset: Int): [Component!] + cwesAggregate(filter: CWEFilter): CWEAggregateResult + affectsAggregate(filter: ComponentFilter): ComponentAggregateResult +} + +type VulnerabilityAggregateResult { + count: Int + idMin: String + idMax: String + parentMin: String + parentMax: String + summaryMin: String + summaryMax: String + detailMin: String + detailMax: String + recommendationMin: String + recommendationMax: String + publishedMin: DateTime + publishedMax: DateTime + modifiedMin: DateTime + modifiedMax: DateTime + createdAtMin: DateTime + createdAtMax: DateTime + cvssMin: Float + cvssMax: Float + cvssSum: Float + cvssAvg: Float + priorityMin: String + priorityMax: String + epssMin: Float + epssMax: Float + epssSum: Float + epssAvg: Float + cisa_kevMin: String + cisa_kevMax: String +} + +input VulnerabilityFilter { + id: StringHashFilter + parent: StringExactFilter_StringRegExpFilter + ratings: Severity_exact + createdAt: DateTimeFilter + cvss: FloatFilter + priority: StringExactFilter_StringRegExpFilter + epss: FloatFilter + cisa_kev: StringExactFilter_StringRegExpFilter + has: [VulnerabilityHasFilter] + and: [VulnerabilityFilter] + or: [VulnerabilityFilter] + not: VulnerabilityFilter +} + +enum VulnerabilityHasFilter { + id + parent + ratings + cwes + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev + affects +} + +input VulnerabilityOrder { + asc: VulnerabilityOrderable + desc: VulnerabilityOrderable + then: VulnerabilityOrder +} + +enum VulnerabilityOrderable { + id + parent + summary + detail + recommendation + published + modified + createdAt + cvss + priority + epss + cisa_kev +} + +input VulnerabilityPatch { + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input VulnerabilityRef { + id: String + parent: String + ratings: Severity + cwes: [CWERef!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime + cvss: Float + priority: String + epss: Float + cisa_kev: String + affects: [ComponentRef!] +} + +input WithinFilter { + polygon: PolygonRef! +} + diff --git a/policies/scripts.go b/policies/scripts.go new file mode 100644 index 0000000..374e313 --- /dev/null +++ b/policies/scripts.go @@ -0,0 +1,20449 @@ +package policyingenstionscript + +var scriptMap = map[int]string{ + 1: ` + package opsmx + import future.keywords.in + + default allow = false + default private_repo = "" + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + raw_body = response.raw_body + parsed_body = json.unmarshal(raw_body) + private_repo = response.body.private + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Repository not found while trying to fetch Repository Configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository configuration." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + private_repo = false + msg := sprintf("Repository %v/%v is publically accessible.", [input.metadata.owner,input.metadata.repository]) + sugg := "Please change the repository visibility to private." + error := "" + }`, + + 2: ` + package opsmx + import future.keywords.in + + default allow = false + + required_min_reviewers = {input.conditions[i].condition_value|input.conditions[i].condition_name == "Minimum Reviewers Policy"} + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository,"branches",input.metadata.branch, "protection"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + raw_body = response.raw_body + parsed_body = json.unmarshal(raw_body) + reviewers = response.body.required_pull_request_reviews.required_approving_review_count + + allow { + response.status_code = 200 + } + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + msg := "" + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + error := "The branch protection policy for mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + msg := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + not response.status_code in [401, 404, 500, 200, 301, 302] + msg := "" + error := sprintf("Error %v:%v receieved from Github upon trying to fetch repository branch protection policy configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + reviewers == 0 + msg := sprintf("The branch protection policy that mandates a pull request before merging has been deactivated for the %s branch of the %v on GitHub", [input.metadata.branch,input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by establishing the correct minimum reviewers for %s Github repo", [input.metadata.repository]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + reviewers < required_min_reviewers + msg := sprintf("The branch protection policy that mandates a pull request before merging has mandatory reviewers count less than required for the %s branch of the %v on GitHub", [input.metadata.branch,input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by establishing the correct minimum reviewers for %s Github repo", [input.metadata.repository]) + error := "" + }`, + + 3: ` + package opsmx + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.github_org, input.metadata.github_repo,"branches",input.metadata.branch,"protection"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + raw_body = response.raw_body + parsed_body = json.unmarshal(raw_body) + obj := response.body + has_key(x, k) { + dont_care(x[k]) + } + dont_care(_) = true + default branch_protection = false + branch_protection = has_key(obj, "required_pull_request_reviews") + allow { + response.status_code = 200 + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 404 + msg := "" + sugg := "Kindly provide the accurate repository name, organization, and branch details" + error := sprintf("%v %v",[response.status_code,response.body.message]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 403 + msg := "" + sugg := sprintf("The repository %v is private,Make this repository public to enable this feature", [input.metadata.github_repo]) + error := sprintf("%v %v",[response.status_code,response.body.message]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 401 + msg := "" + sugg := "Please provide the Appropriate Git Token for the User" + error := sprintf("%s %v", [parsed_body.message,response.status]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 500 + msg := "Internal Server Error" + sugg := "" + error := "GitHub is not reachable" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + branch_protection != true + msg := sprintf("Github repo %v of branch %v is not protected", [input.metadata.github_repo, input.metadata.default_branch]) + sugg := sprintf("Adhere to the company policy by enforcing Code Owner Reviews for %s Github repo",[input.metadata.github_repo]) + error := "" + }`, + + 4: ` + package opsmx + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.owner, input.metadata.repository,"branches",input.metadata.branch,"protection"] + request_url = concat("/", request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + raw_body = response.raw_body + parsed_body = json.unmarshal(raw_body) + + allow { + response.status_code = 200 + } + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 404 + msg := "" + sugg := "Kindly provide the accurate repository name, organization, and branch details. Also, check if branch protection policy is configured." + error := sprintf("%v %v",[response.status_code,response.body.message]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 401 + msg := "" + sugg := "Please provide the Appropriate Git Token for the User" + error := sprintf("%s %v", [parsed_body.message,response.status]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code = 500 + msg := "Internal Server Error" + sugg := "" + error := "GitHub is not reachable" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.allow_deletions.enabled = true + msg := sprintf("Github repo %v is having policy and branch cannot be deleted", [input.metadata.repository]) + sugg := sprintf("Disable branch deletion in %s Github repo to align with the company policy", [input.metadata.repository]) + error := "" + }`, + + 5: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "branches", input.metadata.branch, "protection", "required_signatures"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + error := "Unauthorized to check repository branch configuration due to Bad Credentials." + msg := "" + sugg := "Kindly check the access token. It must have enough permissions to get repository branch configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + error := "The branch protection policy for mentioned branch for Repository not found while trying to fetch repository branch configuration." + sugg := "Kindly check if the repository and branch provided is correct and the access token has rights to read repository branch protection policy configuration. Also check if the branch protection policy is configured for this repository." + msg := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v:%v receieved from Github upon trying to fetch repository branch configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code in [200, 302] + response.body.enabled != true + msg := sprintf("Branch %v of Github Repository %v/%v does not have signed commits mandatory.", [input.metadata.branch, input.metadata.owner, input.metadata.repository]) + error := "" + sugg := sprintf("Adhere to the company policy by enforcing all commits to be signed for %v/%v Github repo", [input.metadata.owner, input.metadata.repository]) + }`, + + 6: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"orgs", input.metadata.owner] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + raw_body = response.raw_body + parsed_body = json.unmarshal(raw_body) + mfa_enabled = response.body.two_factor_requirement_enabled + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + error := "Unauthorized to check organisation configuration due to Bad Credentials." + msg := "" + sugg := "Kindly check the access token. It must have enough permissions to get organisation configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + error := "Mentioned Organisation not found while trying to fetch org configuration. The repository does not belong to an organisation." + sugg := "Kindly check if the organisation provided is correct and the access token has rights to read organisation configuration.Also, verify if the repository belongs to an organisation." + msg := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v:%v receieved from Github upon trying to fetch organisation configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + mfa_enabled == null + msg := sprintf("Github Organisation %v doesnt have the mfa enabled.", [input.metadata.owner]) + sugg := sprintf("Adhere to the company policy by enabling 2FA for %s.",[input.metadata.owner]) + error := "" + }`, + + 7: ` + package opsmx + severities = ["LOW"] + vuln_id = input.conditions[0].condition_value + vuln_severity = {input.conditions[i].condition_value | input.conditions[i].condition_name = "severity"} + deny[msg]{ + some i + inputSeverity = severities[i] + some j + vuln_severity[j] == inputSeverity + msg:= sprintf("%v Criticality Vulnerability : %v found in component: %v", [inputSeverity, vuln_id, input.metadata.package_name]) + }`, + + 8: ` + package opsmx + severities = ["CRITICAL"] + vuln_id = input.conditions[0].condition_value + vuln_severity = {input.conditions[i].condition_value | input.conditions[i].condition_name = "severity"} + deny[msg]{ + some i + inputSeverity = severities[i] + some j + vuln_severity[j] == inputSeverity + msg:= sprintf("%v Criticality Vulnerability : %v found in component: %v", [inputSeverity, vuln_id, input.metadata.package_name]) + } + `, + + 9: ` + package opsmx + severities = ["MODERATE","UNDEFINED","MEDIUM","UNKNOWN"] + vuln_id = input.conditions[0].condition_value + vuln_severity = {input.conditions[i].condition_value | input.conditions[i].condition_name = "severity"} + deny[msg]{ + some i + inputSeverity = severities[i] + some j + vuln_severity[j] == inputSeverity + msg:= sprintf("%v Criticality Vulnerability : %v found in component: %v", [inputSeverity, vuln_id, input.metadata.package_name]) + } `, + + 10: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"orgs", input.metadata.owner, "actions", "permissions", "workflow"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check Organisation Workflow Permissions." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get organisation workflow permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Organisation not found while trying to fetch organisation workflow permissions." + sugg := "Kindly check if the organisation provided is correct." + error := "Organisation name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch organisation workflow permissions." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch organisation workflow permissions.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.body.default_workflow_permissions != "read" + msg := sprintf("Default workflow permissions for Organisation %v is not set to read.", [input.metadata.owner]) + sugg := sprintf("Adhere to the company policy by enforcing default_workflow_permissions of Organisation %s to read only.", [input.metadata.owner]) + error := "" + }`, + + 11: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "actions", "permissions", "workflow"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check Repository Workflow Permissions." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository workflow permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Repository not found while trying to fetch repository workflow permissions." + sugg := "Kindly check if the repository provided is correct." + error := "Repository name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch repository workflow permissions." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch repository workflow permissions.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.body.default_workflow_permissions != "read" + msg := sprintf("Default workflow permissions for Repository %v/%v is not set to read.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by enforcing default_workflow_permissions of Repository %v/%v to read only.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 12: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.build_image_sha == "" + msg = "" + sugg = "Ensure that build platform is integrated with SSD." + error = "Complete Build Artifact information could not be identified." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha == "" + msg = "" + sugg = "Ensure that deployment platform is integrated with SSD usin Admission Controller." + error = "Artifact information could not be identified from Deployment Environment." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha != input.metadata.build_image_sha + + msg = sprintf("Non-identical by hash artifacts identified at Build stage and Deployment Environment.\nBuild Image: %v:%v \n Deployed Image: %v:%v", [input.metadata.build_image, input.metadata.build_image_tag, input.metadata.image, input.metadata.image_tag]) + sugg = "Ensure that built image details & deployed Image details match. Check for possible misconfigurations." + error = "" + }`, + + 13: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 14: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 15: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 16: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 17: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 18: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 19: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 20: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 21: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 22: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 23: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 24: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 25: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 26: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 27: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 28: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 29: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 30: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 31: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentationUrl + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 32: ` + package opsmx + import future.keywords.in + + default allow = false + + outside_collaborators_url = concat("/", [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.owner, input.metadata.repository, "collaborators?affiliation=outside&per_page=100"]) + + request = { + "method": "GET", + "url": outside_collaborators_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.github.token]), + }, + } + + default response = "" + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Github.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 301, 302] + count(response.body) > 0 + + collaborators_list = concat(",\n", [response.body[i].login | response.body[i].type == "User"]) + msg := sprintf("%v outside collaborators have access to repository. \n The list of outside collaborators is: %v.", [count(response.body), collaborators_list]) + sugg := "Adhere to the company policy by revoking the access of non-organization members for Github repo." + error := "" + }`, + + 33: ` + package opsmx + import future.keywords.in + + request_url = concat("/", [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "collaborators?affiliation=admin"]) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Github.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := [response.body[i].login | response.body[i].type == "User"] + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Owner access of Github Repository is granted to bot users. Number of bot users having owner access: %v. Name of bots having owner access: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v/%v Repository.", [input.metadata.repository,input.metadata.owner]) + error := "" + }`, + + 34: ` + package opsmx + import future.keywords.in + + request_url = concat("/", [input.metadata.ssd_secret.github.rest_api_url, "orgs", input.metadata.owner, "members?role=admin"]) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check organisation members." + sugg := "Kindly check the access token. It must have enough permissions to get organisation members." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read organisation members. Also check if the repository belongs to an organization." + error := "Mentioned branch for Repository not found while trying to fetch organisation members. Either Organisation/Repository name is incorrect or the repository does not belong to an organization." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch organisation members. Error %v:%v receieved from Github.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := [response.body[i].login | response.body[i].type == "User"] + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Owner access of Github Organization is granted to bot users. Number of bot users having owner access: %v. Name of bots having owner access: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v Organization.", [input.metadata.owner]) + error := "" + }`, + + 35: ` + package opsmx + import future.keywords.in + + default allow = false + default active_hooks = [] + default active_hooks_count = 0 + default hooks_with_secret = [] + default hooks_with_secret_count = 0 + + request_url = concat("/",[input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "hooks"]) + token = input.metadata.ssd_secret.github.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + active_hooks = [response.body[i].config | response.body[i].active == true] + hooks_with_secret = [response.body[i].config.secret | response.body[i].active == true] + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository webhook configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository webhook configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository webhook configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository webhook configuration. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository webhook configuration. Error %v:%v receieved from Github upon trying to fetch repository webhook configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + active_hooks_count = count(active_hooks) + hooks_with_secret_count = count(hooks_with_secret) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + active_hooks_count != 0 + + active_hooks_count > hooks_with_secret_count + msg := sprintf("Webhook authentication failed: Secret not set for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by configuring the webhook secret for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 36: ` + package opsmx + import future.keywords.in + + default allow = false + default active_hooks = [] + default active_hooks_count = 0 + default insecure_active_hooks = [] + default insecure_active_hooks_count = 0 + + request_url = concat("/",[input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "hooks"]) + token = input.metadata.ssd_secret.github.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + active_hooks = [response.body[i].config | response.body[i].active == true] + insecure_active_hooks = [active_hooks[j].url | active_hooks[j].insecure_ssl == "1"] + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository webhook configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository webhook configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository webhook configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository webhook configuration. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository webhook configuration. Error %v:%v receieved from Github upon trying to fetch repository webhook configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + active_hooks_count = count(active_hooks) + insecure_active_hooks_count = count(insecure_active_hooks) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + active_hooks_count > 0 + insecure_active_hooks_count > 0 + + msg := sprintf("Webhook SSL Check failed: SSL/TLS not enabled for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by enabling the webhook ssl/tls for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 37: ` + package opsmx + import future.keywords.in + default approved_servers_count = 0 + approved_servers_count = count(input.metadata.ssd_secret.build_access_config.credentials) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }] { + approved_servers_count == 0 + msg:="" + sugg:="Set the BuildAccessConfig.Credentials parameter with trusted build server URLs to strengthen artifact validation during the deployment process." + error:="The essential list of approved build URLs remains unspecified" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + count(input.metadata.ssd_secret.build_access_config.credentials) > 0 + build_url = split(input.metadata.build_url, "/")[2] + list_of_approved_servers = [split(input.metadata.ssd_secret.build_access_config.credentials[i].url, "/")[2] |input.metadata.ssd_secret.build_access_config.credentials[i].url != ""] + + not build_url in list_of_approved_servers + msg:=sprintf("The artifact has not been sourced from an approved build server.\nPlease verify the artifacts origin against the following approved build URLs: %v", [concat(",", list_of_approved_servers)]) + sugg:="Ensure the artifact is sourced from an approved build server." + error:="" + }`, + + 38: ` + package opsmx + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.build_image_sha == "" + msg = "" + sugg = "Ensure that build platform is integrated with SSD." + error = "Complete Build Artifact information could not be identified." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha == "" + msg = "" + sugg = "Ensure that deployment platform is integrated with SSD usin Admission Controller." + error = "Artifact information could not be identified from Deployment Environment." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha != input.metadata.build_image_sha + + msg = sprintf("Non-identical by hash artifacts identified at Build stage and Deployment Environment.\nBuild Image: %v:%v \n Deployed Image: %v:%v", [input.metadata.build_image, input.metadata.build_image_tag, input.metadata.image, input.metadata.image_tag]) + sugg = "Ensure that built image details & deployed Image details match. Check for possible misconfigurations." + error = "" + }`, + 39: ` + package opsmx + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.build_image_sha == "" + msg = "" + sugg = "Ensure that build platform is integrated with SSD." + error = "Complete Build Artifact information could not be identified." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha == "" + msg = "" + sugg = "Ensure that deployment platform is integrated with SSD usin Admission Controller." + error = "Artifact information could not be identified from Deployment Environment." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.image_sha != input.metadata.build_image_sha + + msg = sprintf("Non-identical by hash artifacts identified at Build stage and Deployment Environment.\nBuild Image: %v:%v \n Deployed Image: %v:%v", [input.metadata.build_image, input.metadata.build_image_tag, input.metadata.image, input.metadata.image_tag]) + sugg = "Ensure that built image details & deployed Image details match. Check for possible misconfigurations." + error = "" + }`, + + 40: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + license_url = response.body.license.url + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Repository not found while trying to fetch Repository Configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository configuration." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + license_url == null + msg := sprintf("GitHub License not found for the %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by adding a License file for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 41: ` + package opsmx + import future.keywords.in + + default approved_artifact_repos = [] + default image_source = "" + + image_details = split(input.metadata.image,"/") + + image_source = concat("/",["docker.io", image_details[0]]) { + count(image_details) <= 2 + not contains(image_details[0], ".") + } + + image_source = concat("/",[image_details[0], image_details[1]]) { + count(image_details) == 2 + contains(image_details[0], ".") + } + + image_source = concat("/",[image_details[0], image_details[1]]) { + count(image_details) == 3 + } + + approved_artifact_repos = split(input.metadata.ssd_secret.authorized_artifact_repo, ",") + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + count(approved_artifact_repos) == 0 + error := "The essential list of Authorized Artifact Repositories remains unspecified." + sugg := "Set the AuthorizedArtifactRepos parameter with trusted Artifact Repo to strengthen artifact validation during the deployment process." + msg := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not image_source in approved_artifact_repos + + msg := sprintf("The artifact %v:%v has not been sourced from an authorized artifact repo.\nPlease verify the artifacts origin against the following Authorized Artifact Repositories: %v", [input.metadata.image, input.metadata.image_tag, input.metadata.ssd_secret.authorized_artifact_repo]) + sugg := "Ensure the artifact is sourced from an authorized artifact repo." + error := "" + }`, + + 42: ` + package opsmx + import future.keywords.in + + openssf_results_file = concat("_", [input.metadata.owner, input.metadata.repository, input.metadata.build_id]) + openssf_results_file_complete = concat("", [openssf_results_file, "_scorecard.json"]) + + policy_name = input.conditions[0].condition_name + check_orig = replace(replace(policy_name, "Open SSF ", ""), " Policy", "") + + check_name = replace(lower(check_orig), " ", "-") + threshold = to_number(input.conditions[0].condition_value) + request_url = concat("",[input.metadata.toolchain_addr, "/api", "/v1", "/openssfScore?scoreCardName=", openssf_results_file_complete, "&", "checkName=", check_name, "&", "scanOperation=", "openssfScan"]) + + request = { + "method": "GET", + "url": request_url, + } + + response = http.send(request) + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 404 + msg := "" + sugg := sprintf("Results for %v check could not be obtained. Suggests incompatibility between the check and repository. Kindly enable related features and integrations.", [policy_name]) + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + error := sprintf("Error Received: %v.",[response.body.error]) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved: %v", [response.body.error]) + sugg := "Kindly check if toolchain service is available in SSD environment and OpenSSF integration Policies are enabled." + } + + default in_range = false + + isNumberBetweenTwoNumbers(num, lower, upper) { + num >= lower + num <= upper + } + + in_range = isNumberBetweenTwoNumbers(response.body.score, 0, 10) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + in_range == true + response.body.score < threshold + + documentation := response.body.documentation + msg := sprintf("%v score for repo %v/%v is %v, which is less than 5 out 10.", [policy_name, input.metadata.owner, input.metadata.repository, response.body.score]) + sugg := sprintf("%v Check Documentation: %v", [input.metadata.suggestion, documentation]) + error := "" + }`, + + 43: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Reliability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 44: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Reliability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 45: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Reliability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 46: ` + package opsmx + + missing(obj, field) { + not obj[field] + } + + missing(obj, field) { + obj[field] == "" + } + + canonify_cpu(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_cpu(orig) = new { + not is_number(orig) + endswith(orig, "m") + new := to_number(replace(orig, "m", "")) + } + + canonify_cpu(orig) = new { + not is_number(orig) + not endswith(orig, "m") + regex.find_n("^[0-9]+(\\.[0-9]+)?$", orig,-1) + new := to_number(orig) * 1000 + } + + # 10 ** 21 + mem_multiple("E") = 1000000000000000000000 + + # 10 ** 18 + mem_multiple("P") = 1000000000000000000 + + # 10 ** 15 + mem_multiple("T") = 1000000000000000 + + # 10 ** 12 + mem_multiple("G") = 1000000000000 + + # 10 ** 9 + mem_multiple("M") = 1000000000 + + # 10 ** 6 + mem_multiple("k") = 1000000 + + # 10 ** 3 + mem_multiple("") = 1000 + + # Kubernetes accepts millibyte precision when it probably shouldnt. + # https://github.com/kubernetes/kubernetes/issues/28741 + + # 10 ** 0 + mem_multiple("m") = 1 + + # 1000 * 2 ** 10 + mem_multiple("Ki") = 1024000 + + # 1000 * 2 ** 20 + mem_multiple("Mi") = 1048576000 + + # 1000 * 2 ** 30 + mem_multiple("Gi") = 1073741824000 + + # 1000 * 2 ** 40 + mem_multiple("Ti") = 1099511627776000 + + # 1000 * 2 ** 50 + mem_multiple("Pi") = 1125899906842624000 + + # 1000 * 2 ** 60 + mem_multiple("Ei") = 1152921504606846976000 + + get_suffix(mem) = suffix { + not is_string(mem) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 0 + suffix := substring(mem, count(mem) - 1, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + suffix := substring(mem, count(mem) - 2, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + not mem_multiple(substring(mem, count(mem) - 2, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 0 + suffix := "" + } + + canonify_mem(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_mem(orig) = new { + not is_number(orig) + suffix := get_suffix(orig) + raw := replace(orig, suffix, "") + regex.find_n("^[0-9]+(\\.[0-9]+)?$", raw, -1) + new := to_number(raw) * mem_multiple(suffix) + } + + # Ephemeral containers not checked as it is not possible to set field. + + deny[{"alertMsg": msg, "suggestion": "Suggest to check the resource limits set and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "containers"}] + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to check the resource limits set and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "initContainers"}] + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.limits.cpu + not canonify_cpu(cpu_orig) + msg := sprintf("container <%v> cpu limit <%v> could not be parsed", [container.name, cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.limits.memory + not canonify_mem(mem_orig) + msg := sprintf("container <%v> memory limit <%v> could not be parsed", [container.name, mem_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources + msg := sprintf("container <%v> has no resource limits", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources.limits + msg := sprintf("container <%v> has no resource limits", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.limits, "cpu") + msg := sprintf("container <%v> has no cpu limit", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.limits, "memory") + msg := sprintf("container <%v> has no memory limit", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.limits.cpu + cpu := canonify_cpu(cpu_orig) + max_cpu_orig := input.parameters.cpu + max_cpu := canonify_cpu(max_cpu_orig) + cpu > max_cpu + msg := sprintf("container <%v> cpu limit <%v> is higher than the maximum allowed of <%v>", [container.name, cpu_orig, max_cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.limits.memory + mem := canonify_mem(mem_orig) + max_mem_orig := input.parameters.memory + max_mem := canonify_mem(max_mem_orig) + mem > max_mem + msg := sprintf("container <%v> memory limit <%v> is higher than the maximum allowed of <%v>", [container.name, mem_orig, max_mem_orig]) + }`, + + 47: ` + package opsmx + + missing(obj, field) = true { + not obj[field] + } + + missing(obj, field) = true { + obj[field] == "" + } + + canonify_cpu(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_cpu(orig) = new { + not is_number(orig) + endswith(orig, "m") + new := to_number(replace(orig, "m", "")) + } + + canonify_cpu(orig) = new { + not is_number(orig) + not endswith(orig, "m") + regex.find_n("^[0-9]+(\\.[0-9]+)?$", orig, -1) + new := to_number(orig) * 1000 + } + + # 10 ** 21 + mem_multiple("E") = 1000000000000000000000 { true } + + # 10 ** 18 + mem_multiple("P") = 1000000000000000000 { true } + + # 10 ** 15 + mem_multiple("T") = 1000000000000000 { true } + + # 10 ** 12 + mem_multiple("G") = 1000000000000 { true } + + # 10 ** 9 + mem_multiple("M") = 1000000000 { true } + + # 10 ** 6 + mem_multiple("k") = 1000000 { true } + + # 10 ** 3 + mem_multiple("") = 1000 { true } + + # Kubernetes accepts millibyte precision when it probably shouldnt. + # https://github.com/kubernetes/kubernetes/issues/28741 + # 10 ** 0 + mem_multiple("m") = 1 { true } + + # 1000 * 2 ** 10 + mem_multiple("Ki") = 1024000 { true } + + # 1000 * 2 ** 20 + mem_multiple("Mi") = 1048576000 { true } + + # 1000 * 2 ** 30 + mem_multiple("Gi") = 1073741824000 { true } + + # 1000 * 2 ** 40 + mem_multiple("Ti") = 1099511627776000 { true } + + # 1000 * 2 ** 50 + mem_multiple("Pi") = 1125899906842624000 { true } + + # 1000 * 2 ** 60 + mem_multiple("Ei") = 1152921504606846976000 { true } + + get_suffix(mem) = suffix { + not is_string(mem) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 0 + suffix := substring(mem, count(mem) - 1, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + suffix := substring(mem, count(mem) - 2, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + not mem_multiple(substring(mem, count(mem) - 2, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 0 + suffix := "" + } + + canonify_mem(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_mem(orig) = new { + not is_number(orig) + suffix := get_suffix(orig) + raw := replace(orig, suffix, "") + regex.find_n("^[0-9]+(\\.[0-9]+)?$", raw, -1) + new := to_number(raw) * mem_multiple(suffix) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to set the resource request limits and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "containers"}] + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to check the resource request limits and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "initContainers"}] + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.requests.cpu + not canonify_cpu(cpu_orig) + msg := sprintf("container <%v> cpu request <%v> could not be parsed", [container.name, cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.requests.memory + not canonify_mem(mem_orig) + msg := sprintf("container <%v> memory request <%v> could not be parsed", [container.name, mem_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources + msg := sprintf("container <%v> has no resource requests", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources.requests + msg := sprintf("container <%v> has no resource requests", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.requests, "cpu") + msg := sprintf("container <%v> has no cpu request", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.requests, "memory") + msg := sprintf("container <%v> has no memory request", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.requests.cpu + cpu := canonify_cpu(cpu_orig) + max_cpu_orig := input.parameters.cpu + max_cpu := canonify_cpu(max_cpu_orig) + cpu > max_cpu + msg := sprintf("container <%v> cpu request <%v> is higher than the maximum allowed of <%v>", [container.name, cpu_orig, max_cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.requests.memory + mem := canonify_mem(mem_orig) + max_mem_orig := input.parameters.memory + max_mem := canonify_mem(max_mem_orig) + mem > max_mem + msg := sprintf("container <%v> memory request <%v> is higher than the maximum allowed of <%v>", [container.name, mem_orig, max_mem_orig]) + }`, + + 48: ` + package opsmx + + severity = "high" + default findings_count = 0 + + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + findings_count = response.body.totalFindings + findings = response.body.findings + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Semgrep Scan: %v ",[findings[i].rule_name]) + msg := sprintf("%v: %v", [findings[i].rule_name, findings[i].rule_message]) + sugg := "Please examine the medium-severity findings in the SEMGREP analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 49: ` + package opsmx + + severity = "medium" + default findings_count = 0 + + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + findings_count = response.body.totalFindings + findings = response.body.findings + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Semgrep Scan: %v ",[findings[i].rule_name]) + msg := sprintf("%v: %v", [findings[i].rule_name, findings[i].rule_message]) + sugg := "Please examine the medium-severity findings in the SEMGREP analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 50: ` + package opsmx + + missing(obj, field) = true { + not obj[field] + } + + missing(obj, field) = true { + obj[field] == "" + } + + canonify_cpu(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_cpu(orig) = new { + not is_number(orig) + endswith(orig, "m") + new := to_number(replace(orig, "m", "")) + } + + canonify_cpu(orig) = new { + not is_number(orig) + not endswith(orig, "m") + regex.find_n("^[0-9]+$", orig, -1) + new := to_number(orig) * 1000 + } + + canonify_cpu(orig) = new { + not is_number(orig) + not endswith(orig, "m") + regex.find_n("^[0-9]+[.][0-9]+$", orig, -1) + new := to_number(orig) * 1000 + } + + # 10 ** 21 + mem_multiple("E") = 1000000000000000000000 { true } + + # 10 ** 18 + mem_multiple("P") = 1000000000000000000 { true } + + # 10 ** 15 + mem_multiple("T") = 1000000000000000 { true } + + # 10 ** 12 + mem_multiple("G") = 1000000000000 { true } + + # 10 ** 9 + mem_multiple("M") = 1000000000 { true } + + # 10 ** 6 + mem_multiple("k") = 1000000 { true } + + # 10 ** 3 + mem_multiple("") = 1000 { true } + + # Kubernetes accepts millibyte precision when it probably shouldnt. + # https://github.com/kubernetes/kubernetes/issues/28741 + # 10 ** 0 + mem_multiple("m") = 1 { true } + + # 1000 * 2 ** 10 + mem_multiple("Ki") = 1024000 { true } + + # 1000 * 2 ** 20 + mem_multiple("Mi") = 1048576000 { true } + + # 1000 * 2 ** 30 + mem_multiple("Gi") = 1073741824000 { true } + + # 1000 * 2 ** 40 + mem_multiple("Ti") = 1099511627776000 { true } + + # 1000 * 2 ** 50 + mem_multiple("Pi") = 1125899906842624000 { true } + + # 1000 * 2 ** 60 + mem_multiple("Ei") = 1152921504606846976000 { true } + + get_suffix(mem) = suffix { + not is_string(mem) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 0 + suffix := substring(mem, count(mem) - 1, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + suffix := substring(mem, count(mem) - 2, -1) + mem_multiple(suffix) + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) > 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + not mem_multiple(substring(mem, count(mem) - 2, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 1 + not mem_multiple(substring(mem, count(mem) - 1, -1)) + suffix := "" + } + + get_suffix(mem) = suffix { + is_string(mem) + count(mem) == 0 + suffix := "" + } + + canonify_mem(orig) = new { + is_number(orig) + new := orig * 1000 + } + + canonify_mem(orig) = new { + not is_number(orig) + suffix := get_suffix(orig) + raw := replace(orig, suffix, "") + regex.find_n("^[0-9]+(\\.[0-9]+)?$", raw, -1) + new := to_number(raw) * mem_multiple(suffix) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to set the resource limits and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "containers"}] + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to set the resource limits and optimize them.", "error": ""}] { + general_violation[{"msg": msg, "field": "initContainers"}] + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.limits.cpu + not canonify_cpu(cpu_orig) + msg := sprintf("container <%v> cpu limit <%v> could not be parsed", [container.name, cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.limits.memory + not canonify_mem(mem_orig) + msg := sprintf("container <%v> memory limit <%v> could not be parsed", [container.name, mem_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_orig := container.resources.requests.cpu + not canonify_cpu(cpu_orig) + msg := sprintf("container <%v> cpu request <%v> could not be parsed", [container.name, cpu_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_orig := container.resources.requests.memory + not canonify_mem(mem_orig) + msg := sprintf("container <%v> memory request <%v> could not be parsed", [container.name, mem_orig]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources + msg := sprintf("container <%v> has no resource limits", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources.limits + msg := sprintf("container <%v> has no resource limits", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.limits, "cpu") + msg := sprintf("container <%v> has no cpu limit", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.limits, "memory") + msg := sprintf("container <%v> has no memory limit", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + not container.resources.requests + msg := sprintf("container <%v> has no resource requests", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.requests, "cpu") + msg := sprintf("container <%v> has no cpu request", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + missing(container.resources.requests, "memory") + msg := sprintf("container <%v> has no memory request", [container.name]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + cpu_limits_orig := container.resources.limits.cpu + cpu_limits := canonify_cpu(cpu_limits_orig) + cpu_requests_orig := container.resources.requests.cpu + cpu_requests := canonify_cpu(cpu_requests_orig) + cpu_ratio := object.get(input.parameters, "cpuRatio", input.parameters.ratio) + to_number(cpu_limits) > to_number(cpu_ratio) * to_number(cpu_requests) + msg := sprintf("container <%v> cpu limit <%v> is higher than the maximum allowed ratio of <%v>", [container.name, cpu_limits_orig, cpu_ratio]) + } + + general_violation[{"msg": msg, "field": field}] { + container := input.request.object.spec[field][_] + mem_limits_orig := container.resources.limits.memory + mem_requests_orig := container.resources.requests.memory + mem_limits := canonify_mem(mem_limits_orig) + mem_requests := canonify_mem(mem_requests_orig) + mem_ratio := input.parameters.ratio + to_number(mem_limits) > to_number(mem_ratio) * to_number(mem_requests) + msg := sprintf("container <%v> memory limit <%v> is higher than the maximum allowed ratio of <%v>", [container.name, mem_limits_orig, mem_ratio]) + }`, + + 51: ``, + + 52: ` + package opsmx + + severity = "low" + default findings_count = 0 + + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=findings_", input.metadata.owner, "_", input.metadata.repository, "_", severity, "_", input.metadata.build_id, "_semgrep.json&scanOperation=semgrepScan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + findings_count = response.body.totalFindings + findings = response.body.findings + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Semgrep Scan: %v ",[findings[i].rule_name]) + msg := sprintf("%v: %v", [findings[i].rule_name, findings[i].rule_message]) + sugg := "Please examine the medium-severity findings in the SEMGREP analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 53: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of privilege escalation containers.", "error": ""}] { + not is_update(input.request) + + c := input_containers[_] + input_allow_privilege_escalation(c) + msg := sprintf("Privilege escalation container is not allowed: %v", [c.name]) + } + + input_allow_privilege_escalation(c) { + not has_field(c, "securityContext") + } + input_allow_privilege_escalation(c) { + not c.securityContext.allowPrivilegeEscalation == false + } + input_containers[c] { + c := input.request.object.spec.containers[_] + } + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + has_field(object, field) = true { + object[field] + } + + is_update(review) { + review.operation == "UPDATE" + }`, + + 54: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of AppArmor Profiles..", "error": ""}] { + metadata := input.request.object.metadata + container := input_containers[_] + not input_apparmor_allowed(container, metadata) + msg := sprintf("AppArmor profile is not allowed, pod: %v, container: %v. Allowed profiles: %v", [input.request.object.metadata.name, container.name, input.parameters.allowedProfiles]) + } + + input_apparmor_allowed(container, metadata) { + get_annotation_for(container, metadata) == input.parameters.allowedProfiles[_] + } + + input_containers[c] { + c := input.request.object.spec.containers[_] + } + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + get_annotation_for(container, metadata) = out { + out = metadata.annotations[sprintf("container.apparmor.security.beta.kubernetes.io/%v", [container.name])] + } + get_annotation_for(container, metadata) = out { + not metadata.annotations[sprintf("container.apparmor.security.beta.kubernetes.io/%v", [container.name])] + out = "runtime/default" + }`, + + 55: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + # spec.containers.securityContext.capabilities field is immutable. + not is_update(input.request) + + container := input.request.object.spec.containers[_] + has_disallowed_capabilities(container) + msg := sprintf("container <%v> has a disallowed capability. Allowed capabilities are %v", [container.name, get_default(input.parameters, "allowedCapabilities", "NONE")]) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + not is_update(input.request) + container := input.request.object.spec.containers[_] + missing_drop_capabilities(container) + msg := sprintf("container <%v> is not dropping all required capabilities. Container must drop all of %v or \"ALL\"", [container.name, input.parameters.requiredDropCapabilities]) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + not is_update(input.request) + container := input.request.object.spec.initContainers[_] + has_disallowed_capabilities(container) + msg := sprintf("init container <%v> has a disallowed capability. Allowed capabilities are %v", [container.name, get_default(input.parameters, "allowedCapabilities", "NONE")]) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + not is_update(input.request) + container := input.request.object.spec.initContainers[_] + missing_drop_capabilities(container) + msg := sprintf("init container <%v> is not dropping all required capabilities. Container must drop all of %v or \"ALL\"", [container.name, input.parameters.requiredDropCapabilities]) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + not is_update(input.request) + container := input.request.object.spec.ephemeralContainers[_] + has_disallowed_capabilities(container) + msg := sprintf("ephemeral container <%v> has a disallowed capability. Allowed capabilities are %v", [container.name, get_default(input.parameters, "allowedCapabilities", "NONE")]) + } + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the disallowed capabilities of containers.", "error": ""}] { + not is_update(input.request) + container := input.request.object.spec.ephemeralContainers[_] + missing_drop_capabilities(container) + msg := sprintf("ephemeral container <%v> is not dropping all required capabilities. Container must drop all of %v or \"ALL\"", [container.name, input.parameters.requiredDropCapabilities]) + } + + + has_disallowed_capabilities(container) { + allowed := {c | c := lower(input.parameters.allowedCapabilities[_])} + not allowed["*"] + capabilities := {c | c := lower(container.securityContext.capabilities.add[_])} + + count(capabilities - allowed) > 0 + } + + missing_drop_capabilities(container) { + must_drop := {c | c := lower(input.parameters.requiredDropCapabilities[_])} + all := {"all"} + dropped := {c | c := lower(container.securityContext.capabilities.drop[_])} + + count(must_drop - dropped) > 0 + count(all - dropped) > 0 + } + + get_default(obj, param, _) = out { + out = obj[param] + } + + get_default(obj, param, _default) = out { + not obj[param] + not obj[param] == false + out = _default + } + + is_update(review) { + review.operation == "UPDATE" + }`, + + 56: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of Flex Volumes.", "error": ""}] { + # spec.volumes field is immutable. + not is_update(input.request) + + volume := input_flexvolumes[_] + not input_flexvolumes_allowed(volume) + msg := sprintf("FlexVolume %v is not allowed, pod: %v. Allowed drivers: %v", [volume, input.request.object.metadata.name, input.parameters.allowedFlexVolumes]) + } + + input_flexvolumes_allowed(volume) { + input.parameters.allowedFlexVolumes[_].driver == volume.flexVolume.driver + } + + input_flexvolumes[v] { + v := input.request.object.spec.volumes[_] + has_field(v, "flexVolume") + } + + # has_field returns whether an object has a field + has_field(object, field) = true { + object[field] + } + + is_update(review) { + review.operation == "UPDATE" + }`, + + 57: ` + package opsmx + + # Block if forbidden + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of restricted sysctls in security context.", "error": ""}] { + # spec.securityContext.sysctls field is immutable. + not is_update(input.request) + + sysctl := input.request.object.spec.securityContext.sysctls[_].name + forbidden_sysctl(sysctl) + msg := sprintf("The sysctl %v is not allowed, pod: %v. Forbidden sysctls: %v", [sysctl, input.request.object.metadata.name, input.parameters.forbiddenSysctls]) + } + + # Block if not explicitly allowed + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of restricted sysctls in security context.", "error": ""}] { + not is_update(input.request) + sysctl := input.request.object.spec.securityContext.sysctls[_].name + not allowed_sysctl(sysctl) + msg := sprintf("The sysctl %v is not explicitly allowed, pod: %v. Allowed sysctls: %v", [sysctl, input.request.object.metadata.name, input.parameters.allowedSysctls]) + } + + # * may be used to forbid all sysctls + forbidden_sysctl(sysctl) { + input.parameters.forbiddenSysctls[_] == "*" + } + + forbidden_sysctl(sysctl) { + input.parameters.forbiddenSysctls[_] == sysctl + } + + forbidden_sysctl(sysctl) { + forbidden := input.parameters.forbiddenSysctls[_] + endswith(forbidden, "*") + startswith(sysctl, trim_suffix(forbidden, "*")) + } + + # * may be used to allow all sysctls + allowed_sysctl(sysctl) { + input.parameters.allowedSysctls[_] == "*" + } + + allowed_sysctl(sysctl) { + input.parameters.allowedSysctls[_] == sysctl + } + + allowed_sysctl(sysctl) { + allowed := input.parameters.allowedSysctls[_] + endswith(allowed, "*") + startswith(sysctl, trim_suffix(allowed, "*")) + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 58: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of fsGroup in security context.", "error": ""}] { + # spec.securityContext.fsGroup field is immutable. + not is_update(input.request) + + spec := input.request.object.spec + not input_fsGroup_allowed(spec) + msg := sprintf("The provided pod spec fsGroup is not allowed, pod: %v. Allowed fsGroup: %v", [input.request.object.metadata.name, input.parameters]) + } + + input_fsGroup_allowed(_) { + # RunAsAny - No range is required. Allows any fsGroup ID to be specified. + input.parameters.rule == "RunAsAny" + } + input_fsGroup_allowed(spec) { + # MustRunAs - Validates pod spec fsgroup against all ranges + input.parameters.rule == "MustRunAs" + fg := spec.securityContext.fsGroup + count(input.parameters.ranges) > 0 + range := input.parameters.ranges[_] + value_within_range(range, fg) + } + input_fsGroup_allowed(spec) { + # MayRunAs - Validates pod spec fsgroup against all ranges or allow pod spec fsgroup to be left unset + input.parameters.rule == "MayRunAs" + not has_field(spec, "securityContext") + } + input_fsGroup_allowed(spec) { + # MayRunAs - Validates pod spec fsgroup against all ranges or allow pod spec fsgroup to be left unset + input.parameters.rule == "MayRunAs" + not spec.securityContext.fsGroup + } + input_fsGroup_allowed(spec) { + # MayRunAs - Validates pod spec fsgroup against all ranges or allow pod spec fsgroup to be left unset + input.parameters.rule == "MayRunAs" + fg := spec.securityContext.fsGroup + count(input.parameters.ranges) > 0 + range := input.parameters.ranges[_] + value_within_range(range, fg) + } + value_within_range(range, value) { + range.min <= value + range.max >= value + } + # has_field returns whether an object has a field + has_field(object, field) = true { + object[field] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 59: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of HostPath volumes.", "error": ""}] { + not is_update(input.request) + volume := input_hostpath_volumes[_] + allowedPaths := get_allowed_paths(input) + input_hostpath_violation(allowedPaths, volume) + msg := sprintf("HostPath volume %v is not allowed, pod: %v. Allowed path: %v", [volume, input.request.object.metadata.name, allowedPaths]) + } + + input_hostpath_violation(allowedPaths, _) { + allowedPaths == [] + } + input_hostpath_violation(allowedPaths, volume) { + not input_hostpath_allowed(allowedPaths, volume) + } + + get_allowed_paths(arg) = out { + not arg.parameters + out = [] + } + get_allowed_paths(arg) = out { + not arg.parameters.allowedHostPaths + out = [] + } + get_allowed_paths(arg) = out { + out = arg.parameters.allowedHostPaths + } + + input_hostpath_allowed(allowedPaths, volume) { + allowedHostPath := allowedPaths[_] + path_matches(allowedHostPath.pathPrefix, volume.hostPath.path) + not allowedHostPath.readOnly == true + } + + input_hostpath_allowed(allowedPaths, volume) { + allowedHostPath := allowedPaths[_] + path_matches(allowedHostPath.pathPrefix, volume.hostPath.path) + allowedHostPath.readOnly + not writeable_input_volume_mounts(volume.name) + } + + writeable_input_volume_mounts(volume_name) { + container := input_containers[_] + mount := container.volumeMounts[_] + mount.name == volume_name + not mount.readOnly + } + + # This allows "/foo", "/foo/", "/foo/bar" etc., but + # disallows "/fool", "/etc/foo" etc. + path_matches(prefix, path) { + a := path_array(prefix) + b := path_array(path) + prefix_matches(a, b) + } + path_array(p) = out { + p != "/" + out := split(trim(p, "/"), "/") + } + # This handles the special case for "/", since + # split(trim("/", "/"), "/") == [""] + path_array("/") = [] + + prefix_matches(a, b) { + count(a) <= count(b) + not any_not_equal_upto(a, b, count(a)) + } + + any_not_equal_upto(a, b, n) { + a[i] != b[i] + i < n + } + + input_hostpath_volumes[v] { + v := input.request.object.spec.volumes[_] + has_field(v, "hostPath") + } + + # has_field returns whether an object has a field + has_field(object, field) = true { + object[field] + } + input_containers[c] { + c := input.request.object.spec.containers[_] + } + + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 60: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the sharing of host namespaces.", "error": ""}] { + not is_update(input.review) + + input_share_hostnamespace(input.request.object) + msg := sprintf("Sharing the host namespace is not allowed: %v", [input.request.object.metadata.name]) + } + + input_share_hostnamespace(o) { + o.spec.hostPID + } + input_share_hostnamespace(o) { + o.spec.hostIPC + } + + is_update(review) { + review.operation == "UPDATE" + }`, + + 61: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of hostNetwork and hostPort.", "error": ""}] { + not is_update(input.request) + + input_share_hostnetwork(input.request.object) + msg := sprintf("The specified hostNetwork and hostPort are not allowed, pod: %v. Allowed values: %v", [input.request.object.metadata.name, input.parameters]) + } + + input_share_hostnetwork(o) { + not input.parameters.hostNetwork + o.spec.hostNetwork + } + + input_share_hostnetwork(_) { + hostPort := input_containers[_].ports[_].hostPort + hostPort < input.parameters.min + } + + input_share_hostnetwork(_) { + hostPort := input_containers[_].ports[_].hostPort + hostPort > input.parameters.max + } + + input_containers[c] { + c := input.request.object.spec.containers[_] + } + + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 62: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of privileged containers in security context.", "error": ""}] { + not is_update(input.request) + + c := input_containers[_] + c.securityContext.privileged + msg := sprintf("Privileged container is not allowed: %v, securityContext: %v", [c.name, c.securityContext]) + } + + input_containers[c] { + c := input.request.object.spec.containers[_] + } + + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 63: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of restricted ProcMount types.", "error": ""}] { + not is_update(input.request) + + c := input_containers[_] + allowedProcMount := get_allowed_proc_mount(input) + not input_proc_mount_type_allowed(allowedProcMount, c) + msg := sprintf("ProcMount type is not allowed, container: %v. Allowed procMount types: %v", [c.name, allowedProcMount]) + } + + input_proc_mount_type_allowed(allowedProcMount, c) { + allowedProcMount == "default" + lower(c.securityContext.procMount) == "default" + } + input_proc_mount_type_allowed(allowedProcMount, _) { + allowedProcMount == "unmasked" + } + + input_containers[c] { + c := input.request.object.spec.containers[_] + c.securityContext.procMount + } + input_containers[c] { + c := input.request.object.spec.initContainers[_] + c.securityContext.procMount + } + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + c.securityContext.procMount + } + + get_allowed_proc_mount(arg) = out { + not arg.parameters + out = "default" + } + get_allowed_proc_mount(arg) = out { + not arg.parameters.procMount + out = "default" + } + get_allowed_proc_mount(arg) = out { + arg.parameters.procMount + not valid_proc_mount(arg.parameters.procMount) + out = "default" + } + get_allowed_proc_mount(arg) = out { + valid_proc_mount(arg.parameters.procMount) + out = lower(arg.parameters.procMount) + } + + valid_proc_mount(str) { + lower(str) == "default" + } + valid_proc_mount(str) { + lower(str) == "unmasked" + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 64: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to use only read-only root filesystem container.", "error": ""}] { + not is_update(input.request) + + c := input_containers[_] + input_read_only_root_fs(c) + msg := sprintf("only read-only root filesystem container is allowed: %v", [c.name]) + } + + input_read_only_root_fs(c) { + not has_field(c, "securityContext") + } + input_read_only_root_fs(c) { + not c.securityContext.readOnlyRootFilesystem == true + } + + input_containers[c] { + c := input.request.object.spec.containers[_] + } + input_containers[c] { + c := input.request.object.spec.initContainers[_] + } + input_containers[c] { + c := input.request.object.spec.ephemeralContainers[_] + } + + has_field(object, field) = true { + object[field] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 65: ` + package opsmx + + deny[{"alertMsg": msg, "suggestion": "Suggest to restrict the usage of disallowed volume types.", "error": ""}] { + not is_update(input.request) + + volume_fields := {x | input.request.object.spec.volumes[_][x]; x != "name"} + field := volume_fields[_] + not input_volume_type_allowed(field) + msg := sprintf("The volume type %v is not allowed, pod: %v. Allowed volume types: %v", [field, input.request.object.metadata.name, input.parameters.volumes]) + } + + # * may be used to allow all volume types + input_volume_type_allowed(_) { + input.parameters.volumes[_] == "*" + } + + input_volume_type_allowed(field) { + field == input.parameters.volumes[_] + } + + is_update(request) { + request.operation == "UPDATE" + }`, + + 66: ` + package opsmx + import future.keywords.in + + request_url_p1 = concat("/",[input.metadata.ssd_secret.sonarQube_creds.url,"api/qualitygates/project_status?projectKey"]) + request_url = concat("=", [request_url_p1, input.metadata.sonarqube_projectKey]) + + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.projectStatus.status == "ERROR" + msg := sprintf("SonarQube Quality Gate Status Check has failed for project %s. Prioritize and address the identified issues promptly to meet the defined quality standards and ensure software reliability.", [input.metadata.sonarqube_projectKey]) + error := "" + sugg := "Prioritize and address the identified issues promptly to meet the defined quality standards and ensure software reliability." + }`, + + 67: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + error_message = response.body.errors[_].msg + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Maintanability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + contains(error_message, "Component key") + msg := "" + error := sprintf("Sonar Qube Project is not present %s", [input.metadata.sonarqube_projectKey]) + sugg := "Project is incorrect, Please provide the appropriate project key" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 68: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + error_message = response.body.errors[_].msg + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Maintanability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + contains(error_message, "Component key") + msg := "" + error := sprintf("Sonar Qube Project is not present %s", [input.metadata.sonarqube_projectKey]) + sugg := "Project is incorrect, Please provide the appropriate project key" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 69: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + error_message = response.body.errors[_].msg + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Maintanability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + contains(error_message, "Component key") + msg := "" + error := sprintf("Sonar Qube Project is not present %s", [input.metadata.sonarqube_projectKey]) + sugg := "Project is incorrect, Please provide the appropriate project key" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 70: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + error_message = response.body.errors[_].msg + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Maintanability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + contains(error_message, "Component key") + msg := "" + error := sprintf("Sonar Qube Project is not present %s", [input.metadata.sonarqube_projectKey]) + sugg := "Project is incorrect, Please provide the appropriate project key" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 71: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Security metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 72: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Security metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 73: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Security metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 74: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Security metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 75: ` + package opsmx + + import future.keywords.in + + rating_map := { + "A": "5.0", + "B": "4.0", + "C": "3.0", + "D": "2.0", + "E": "1.0" + } + + required_rating_name := concat("", ["new_", lower(split(input.conditions[0].condition_name, " ")[1]), "_rating"]) + required_rating_score := rating_map[split(input.conditions[0].condition_name, " ")[3]] + + request_url = sprintf("%s/api/measures/component?metricKeys=%s&component=%s", [input.metadata.ssd_secret.sonarQube_creds.url, required_rating_name, input.metadata.sonarqube_projectKey]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.sonarQube_creds.token]), + }, + } + default response = "" + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.sonarqube_projectKey == "" + msg := "" + error := "Project name not provided." + sugg := "Verify the integration of Sonarqube in SSD is configured properly." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response == "" + msg := "" + error := "Response not received." + sugg := "Kindly verify the endpoint provided and the reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "" + error := "Sonarqube host provided is not reponding or is not reachable." + sugg := "Kindly verify the configuration of sonarqube endpoint and reachability of the endpoint." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + not contains(error_message, "Component key") + msg := "" + error := sprintf("%s %v", [response.status, response.body.errors[_].msg]) + sugg := sprintf("Please add the Reliability metrics keys for the project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + error := sprintf("Error: 404 Not Found. Project not configured for repository %s.", [input.metadata.sonarqube_projectKey]) + sugg := sprintf("Please configure project %s in SonarQube.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 403 + error := sprintf("Error: 403 Forbidden. Provided Token does not have privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + msg := "" + sugg := sprintf("Kindly verify the access token provided is correct and have required privileges to read status of project %s.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + not response.status_code in [500, 404, 403, 200, 302] + error := sprintf("Error: %v: %v", [response.status_code]) + msg := "" + sugg := sprintf("Kindly rectify the error while fetching %s project status.", [input.metadata.sonarqube_projectKey]) + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200, 302] + score = response.body.component.measures[0].period.value + score == required_rating_score + msg := sprintf("The SonarQube metric %s stands at %s for project %s, falling short of the expected value.", [required_rating_name, score, input.metadata.sonarqube_projectKey]) + sugg := sprintf("Adhere to code security standards to improve score for project %s.", [input.metadata.sonarqube_projectKey]) + error := "" + }`, + + 76: ` + package opsmx + severities = ["HIGH"] + vuln_id = input.conditions[0].condition_value + vuln_severity = {input.conditions[i].condition_value | input.conditions[i].condition_name = "severity"} + deny[msg]{ + some i + inputSeverity = severities[i] + some j + vuln_severity[j] == inputSeverity + msg:= sprintf("%v Criticality Vulnerability : %v found in component: %v", [inputSeverity, vuln_id, input.metadata.package_name]) + } + `, + + 77: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",\n",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 78: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",\n",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 79: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",\n",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 80: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 81: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 82: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 83: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 84: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 85: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 86: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 87: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 88: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 89: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 90: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 91: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 92: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 93: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 94: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 95: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 96: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 97: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 98: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 99: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 100: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 101: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 102: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 103: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 104: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 105: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 106: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 107: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 108: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 109: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 110: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 111: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 112: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 113: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 114: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 115: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 116: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 117: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 118: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 119: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 120: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 121: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 122: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 123: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 124: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 125: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 126: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 127: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 128: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 129: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 130: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 131: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 132: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 133: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 134: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 135: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 136: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 137: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 138: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 139: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 140: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 141: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 142: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 143: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 144: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 145: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 146: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 147: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 148: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 149: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 150: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 151: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 152: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 153: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 154: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 155: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 156: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 157: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 158: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 159: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 160: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 161: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 162: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 163: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 164: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 165: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 166: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 167: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 168: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 169: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 170: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 171: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 172: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 173: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 174: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 175: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 176: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 177: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 178: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 179: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 180: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 181: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 182: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 183: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 184: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 185: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 186: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 187: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 188: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 189: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 190: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 191: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 192: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 193: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 194: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 195: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 196: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 197: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":error}] { + policy = input.conditions[0].condition_name + + input.metadata.results[i].control_title == policy + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v on cluster %v due to following resources: %v", [input.metadata.scan_type, policy, input.metadata.account_name, concat(",",failed_resources)]) + error := "" + suggestion := input.metadata.suggestion + }`, + + 198: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 199: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 200: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 201: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 202: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 203: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 204: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 205: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 206: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 207: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 208: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 209: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 210: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 211: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 212: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 213: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 214: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 215: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 216: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 217: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 218: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 219: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 220: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 221: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 222: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 223: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 224: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 225: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 226: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 227: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 228: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 229: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 230: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 231: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 232: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 233: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 234: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 235: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 236: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 237: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 238: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 239: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 240: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 241: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 242: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 243: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 244: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 245: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 246: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 247: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 248: ` + package opsmx + import future.keywords.in + + policy = input.conditions[0].condition_name + control_id = split(policy, " -")[0] + + deny[{"alertMsg":msg, "suggestion":suggestion, "error":""}] { + input.metadata.results[i].control_id == control_id + control_struct = input.metadata.results[i] + failed_resources = control_struct.failed_resources + counter = count(failed_resources) + counter > 0 + msg := sprintf("%v scan failed for control %v:%v on cluster %v due to following resources: %v", [input.metadata.scan_type, control_struct.control_id, control_struct.control_title, input.metadata.account_name, concat(",",failed_resources)]) + suggestion := input.metadata.suggestion + }`, + + 249: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 250: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 251: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 252: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 253: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 254: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 255: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 256: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := input.metadata.suggestion + }`, + + 257: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := sprintf("Implement best practices as mentioned in %v to improve overall compliance score.", [input.metadata.references]) + }`, + + 258: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := sprintf("Implement best practices as mentioned in %v to improve overall compliance score.", [input.metadata.references]) + }`, + + 259: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := sprintf("Implement best practices as mentioned in %v to improve overall compliance score.", [input.metadata.references]) + }`, + + 260: ` + package opsmx + + condition_value := input.conditions[0].condition_value + min_threshold_str := split(condition_value, "-")[0] + max_threshold_str := split(condition_value, "-")[1] + min_threshold := to_number(min_threshold_str) + max_threshold := to_number(max_threshold_str) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": ""}] { + score := input.metadata.compliance_score + score > min_threshold + score <= max_threshold + msg := sprintf("%v Scan failed for cluster %v as Compliance Score was found to be %v which is below threshold %v.", [input.metadata.scan_type, input.metadata.account_name, score, max_threshold]) + sugg := sprintf("Implement best practices as mentioned in %v to improve overall compliance score.", [input.metadata.references]) + }`, + + 261: ` + package opsmx + import future.keywords.in + + default allow = false + default auto_merge_config = "" + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + auto_merge_config = response.body.allow_auto_merge + status_code = response.status_code + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check the Branch Protection Policy" + error := "401 Unauthorized" + sugg := "Kindly check the access token. It must have enough permissions to read the branch protection policy for repository." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 200, 301, 302] + not response.status_code in codes + msg = "Unable to fetch Branch Protection Policy" + error = sprintf("Error %v:%v receieved from Github upon trying to fetch Branch Protection Policy.", [status_code, response.body.message]) + sugg = "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + status_code in [200, 301, 302] + auto_merge_config == "" + msg = "Auto Merge Config Not Found, indicates Branch Protection Policy is not set" + error = "" + sugg = "Kindly configure Branch Protection Policy for source code repository and make sure to restrict auto merge." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + status_code in [200, 301, 302] + auto_merge_config != input.conditions[0].condition_value + msg = sprintf("Auto Merge is allowed in repo %v", [input.metadata.repository]) + error = "" + sugg = "Kindly restrict auto merge in Branch Protection Policy applied to repository." + }`, + + 262: ` + package opsmx + input_stages = input.metadata.stages + manualJudgment_stages = [input.metadata.stages[i] | input.metadata.stages[i].type == "manualJudgment"] + counter = count(manualJudgment_stages) + deny["No manual judgement stages configured in pipeline"]{ + count(manualJudgment_stages) < 1 + }`, + + 263: ` + package opsmx + + default allow = false + + repo_search = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.github_org, input.metadata.github_repo] + repo_searchurl = concat("/",repo_search) + + branch_search = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.github_org, input.metadata.github_repo,"branches",input.metadata.default_branch] + branch_searchurl = concat("/",branch_search) + + protect_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.github_org, input.metadata.github_repo,"branches",input.metadata.default_branch,"protection"] + protect_url = concat("/",protect_components) + + token = input.metadata.ssd_secret.github.token + + repo_search_request = { + "method": "GET", + "url": repo_searchurl, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + branch_search_request = { + "method": "GET", + "url": branch_searchurl, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + protect_search_request = { + "method": "GET", + "url": protect_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(repo_search_request) + + branch_response = http.send(branch_search_request) + + branch_protect = http.send(protect_search_request) + + branch_check = response.body.default_branch + + AllowAutoMerge = response.body.allow_auto_merge + + delete_branch_on_merge = response.body.delete_branch_on_merge + + branch_protected = branch_response.body.protected + + RequiredReviewers = branch_protect.body.required_pull_request_reviews.required_approving_review_count + + AllowForcePushes = branch_protect.body.allow_force_pushes.enabled + + AllowDeletions = branch_response.body.allow_deletions.enabled + + RequiredSignatures = branch_protect.body.required_signatures.enabled + + EnforceAdmins = branch_protect.body.enforce_admins.enabled + + RequiredStatusCheck = branch_protect.body.required_status_checks.strict + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + branch_check = " " + msg := "Github does not have any branch" + sugg := "Please create a branch" + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + AllowAutoMerge = true + msg := sprintf("The Auto Merge is enabled for the %s owner %s repo", [input.metadata.github_repo, input.metadata.default_branch]) + sugg := "Please disable the Auto Merge" + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + delete_branch_on_merge = true + msg := "The branch protection policy that allows branch deletion is enabled." + sugg := sprintf("Please disable the branch deletion of branch %s of repo %s", [input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + branch_protected = false + msg := sprintf("Github repo %v and branch %v is not protected", [input.metadata.github_repo, input.metadata.default_branch]) + sugg := sprintf("Make sure branch %v of %v repo has some branch policies", [input.metadata.github_repo,input.metadata.default_branch]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + RequiredReviewers = 0 + msg := "The branch protection policy that mandates the minimum review for branch protection has been deactivated." + sugg := sprintf("Activate branch protection: pull request and minimum 1 approval before merging for branch %s of %s repo",[input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + AllowForcePushes = true + msg := "The branch protection policy that allows force pushes is enabled." + sugg := sprintf("Please disable force push of branch %v of repo %v", [input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + AllowDeletions = true + msg := "The branch protection policy that allows branch deletion is enabled." + sugg := sprintf("Please disable the branch deletion of branch %v of repo %v",[input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + RequiredSignatures = true + msg := "The branch protection policy that requires signature is disabled." + sugg := sprintf("Please activate the mandatory GitHub signature policy for branch %v signatures of %v repo",[input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + EnforceAdmins = true + msg := sprintf("The branch protection policy that enforces status checks for repository administrators is disabled", [input.metadata.github_repo]) + sugg := sprintf("Please activate the branch protection policy, dont by pass status checks for repository administrators of branch %s of %s repo",[input.metadata.default_branch,input.metadata.github_repo]) + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + RequiredStatusCheck = true + msg := sprintf("The branch protection policy that requires status check is disabled for the repo %s", [input.metadata.github_repo]) + sugg := sprintf("Please activate the branch protection policy, requiring a need to be up-to-date with the base branch before merging for branch %s of %s repo",[input.metadata.default_branch,input.metadata.github_repo]) + error := "" + }`, + + 264: ` + package opsmx + import future.keywords.in + default approved_servers_count = 0 + default list_approved_user_str = [] + + list_approved_user_str = {input.metadata.ssd_secret.build_access_config.credentials[i].approved_user | split(input.metadata.ssd_secret.build_access_config.credentials[i].url, "/")[2] == build_url} + list_approved_users = split(list_approved_user_str[_], ",") + approved_servers_count = count(input.metadata.ssd_secret.build_access_config.credentials) + build_url = split(input.metadata.build_url, "/")[2] + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }] { + approved_servers_count == 0 + msg:="" + sugg:="Set the BuildAccessConfig.Credentials parameter with trusted build server URLs and users to strengthen artifact validation during the deployment process." + error:="The essential list of approved build URLs and users remains unspecified." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + count(input.metadata.ssd_secret.build_access_config.credentials) > 0 + list_approved_user_str == [] + msg := "" + sugg := "Set the BuildAccessConfig.Credentials parameter with trusted build server URLs and users to strengthen artifact validation during the deployment process." + error := "The essential list of approved build users remains unspecified." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + count(input.metadata.ssd_secret.build_access_config.credentials) > 0 + not input.metadata.build_user in list_approved_users + msg:="The artifact has not been sourced from an approved user.\nPlease verify the artifacts origin." + sugg:="Ensure the artifact is sourced from an approved user." + error:="" + }`, + + 265: ` + package opsmx + import future.keywords.in + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + input.metadata.parent_repo != "" + parent_repo_owner = split(input.metadata.parent_repo, "/")[0] + parent_repo_owner != input.metadata.owner + msg := sprintf("The pipeline uses a forked repo from a different organization %s from %s.", [input.metadata.parent_repo, input.metadata.owner]) + sugg := "Refrain from running pipelines originating from forked repos not belonging to the same organization." + error := "" + }`, + + 266: ` + package opsmx + import future.keywords.in + + default allow = false + + maintainers_url = concat("/", [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.owner, input.metadata.repository, "collaborators?permission=maintain&per_page=100"]) + admins_url = concat("/", [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.owner, input.metadata.repository, "collaborators?permission=admin&per_page=100"]) + + maintainers_request = { + "method": "GET", + "url": maintainers_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.github.token]), + }, + } + + default maintainers_response = "" + maintainers_response = http.send(maintainers_request) + maintainers = [maintainers_response.body[i].login | maintainers_response.body[i].type == "User"] + + admins_request = { + "method": "GET", + "url": admins_url, + "headers": { + "Authorization": sprintf("Bearer %v", [input.metadata.ssd_secret.github.token]), + }, + } + + default admins_response = "" + admins_response = http.send(admins_request) + + admins = [admins_response.body[i].login | admins_response.body[i].type == "User"] + non_admin_maintainers = [maintainers[idx] | not maintainers[idx] in admins] + complete_list = array.concat(admins, non_admin_maintainers) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + maintainers_response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + admins_response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + maintainers_response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + admins_response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + admins_response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + maintainers_response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not admins_response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Github.", [admins_response.status_code, admins_response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not maintainers_response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Github.", [maintainers_response.status_code, maintainers_response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := complete_list + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Maintainer and Admin access of Github Repository providing ability to merge code is granted to bot users. Number of bot users having permissions to merge: %v. Name of bots having permissions to merge: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v/%v Repository.", [input.metadata.repository,input.metadata.owner]) + error := "" + }`, + + 267: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository, "collaborators"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Github.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + admins = [response.body[i].login | response.body[i].role_name == "admin"] + total_users = count(response.body[i]) + admin_users = count(admins) + admin_percentage = admin_users / total_users * 100 + + admin_percentage > 5 + msg := sprintf("More than 5 percentage of total collaborators of %v github repository have admin access", [input.metadata.repository]) + sugg := sprintf("Adhere to the company policy and revoke admin access to some users of the repo %v", [input.metadata.repository]) + error := "" + }`, + + 268: `package opsmx + token = input.metadata.github_access_token + request_components = [input.metadata.rest_url,"repos", input.metadata.github_org, input.metadata.github_repo, "activity?time_period=quarter&activity_type=push&per_page=500"] + + collaborators_components = [input.metadata.rest_url,"repos", input.metadata.github_org, input.metadata.github_repo, "collaborators"] + collaborators_url = concat("/",collaborators_components) + + collaborators = { + "method": "GET", + "url": collaborators_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + coll_resp = http.send(collaborators) + + responsesplit = coll_resp.body + + coll_users = {coluser | + some i + coluser = responsesplit[i]; + coluser.role_name != "admin" + coluser.type == "User" + } + + request_url = concat("/",request_components) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + resp = http.send(request) + link_1 = split(resp.headers.link[0], " ")[0] + decoded_link_1 = replace(link_1, "\u003e;", "") + decoded_link_2 = replace(decoded_link_1, "\u003c", "") + link_request = { + "method": "GET", + "url": decoded_link_2, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + resp2 = http.send(link_request) + + evnt_users = resp.body + + evnt_logins = {user | + some i + user = evnt_users[i]; + user.actor.type == "User" + } + + login_values[login] { + user = evnt_logins[_] + login = user.actor.login + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + allusers = coll_users[_] + eventlogins = evnt_logins[_] + allusers.login == login_values[_] + msg := sprintf("Access of Github repository %s has been granted to users %v who have no activity from last three months", [input.metadata.github_repo,login_values[_]]) + sugg := "Adhere to the company policy and revoke access of inactive members" + error := "" + }`, + + 269: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository,"dependency-graph/sbom"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + error := "Unauthorized to check repository configuration due to Bad Credentials." + msg := "" + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + error := "Repository not found or SBOM could not be fetched." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration. Also, kindly verify if dependency tracking is enabled for the repository." + msg := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v:%v receieved from Github upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.body.sbom = "" + error := sprintf("The SBOM could not be fetched, hence Centralized package manager settings Policy cannot be validated.", [input.metadata.repository]) + sugg := "Please make sure there are some packages in the GitHub Repository." + msg := "" + } + + default pkg_without_version = [] + + pkg_without_version = [pkg2.name | pkg2 := response.body.sbom.packages[_] + pkg2.name != response.body.sbom.name + not startswith(pkg2.name, "actions:") + pkg2.versionInfo == ""] + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + count(pkg_without_version) != 0 + msg := sprintf("The GitHub repository %v/%v exhibits packages with inadequate versioning.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy and mandate proper tagging and versioning for packages of %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 270: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url,"repos", input.metadata.owner, input.metadata.repository,"dependency-graph/sbom"] + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Repository SBOM not found while trying to fetch Repository Configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration. Also, check if dependency mapping is enabled." + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository configuration." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.body.sbom = "" + error := sprintf("The SBOM could not be fetched, hence Centralized package manager settings Policy cannot be validated.", [input.metadata.repository]) + sugg := "Please make sure there are some packages in the GitHub Repository." + msg := "" + } + + default_pkg_list = [] + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + pkg_list = [pkg.name | pkg := response.body.sbom.packages[_] + pkg.name != response.body.sbom.name + not startswith(pkg.name, "actions:")] + + count(pkg_list) == 0 + msg := sprintf("The GitHub repository %v/%v lacks the necessary configuration files for package managers.", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy and consider adding the necessary package manager configuration files to the GitHub repository %v/%v.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, + + 271: ` + package opsmx + + import data.strings + + body := { + "image": input.metadata.image, + "imageTag": input.metadata.image_tag, + "username": input.metadata.ssd_secret.docker.username, + "password": input.metadata.ssd_secret.docker.password + } + + request_url = concat("",[input.metadata.toolchain_addr, "/api", "/v1", "/artifactSign"]) + + request = { + "method": "POST", + "url": request_url, + "body": body + } + + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 500 + msg = sprintf("Artifact %v:%v is not a signed artifact. Kindly verify authenticity of the artifact and its source.",[input.metadata.image, input.metadata.image_tag]) + sugg := "" + error := "" + }`, + + 272: ` + package opsmx + + import data.strings + default signed_imge_sha = "" + + body := { + "image": input.metadata.image, + "imageTag": input.metadata.image_tag, + "username": input.metadata.ssd_secret.docker.username, + "password": input.metadata.ssd_secret.docker.password + } + + request_url = concat("",[input.metadata.toolchain_addr, "/api", "/v1", "/artifactSign"]) + + request = { + "method": "POST", + "url": request_url, + "body": body + } + + response = http.send(request) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.code == 500 + msg = sprintf("Artifact %v:%v is not a signed artifact. Kindly verify authenticity of the artifact and its source.",[input.metadata.image, input.metadata.image_tag]) + sugg := "" + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 200 + signed_image_sha = response.body.imageSha + signed_image_sha != input.metadata.image_sha + msg := "Artifact SHA deployed in Cloud does not match with Signed Artifact SHA." + sugg :="Kindly check the artifact deployed in cloud." + error := "" + }`, + + 273: `sample script`, + + 274: ` + package opsmx + + default secrets_count = 0 + + request_url = concat("/",[input.metadata.toolchain_addr,"api", "v1", "scanResult?fileName="]) + filename_components = ["CodeScan", input.metadata.owner, input.metadata.repository, input.metadata.build_id, "codeScanResult.json"] + filename = concat("_", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=codeSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + high_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "HIGH"] + secrets_count = count(high_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for %v/%v Github repository for branch %v.\nBelow are the secrets identified:\n %s", [input.metadata.owner, input.metadata.repository, input.metadata.branch, concat(",\n", high_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 275: ` + package opsmx + + default secrets_count = 0 + + request_url = concat("/",[input.metadata.toolchain_addr,"api", "v1", "scanResult?fileName="]) + filename_components = ["CodeScan", input.metadata.owner, input.metadata.repository, input.metadata.build_id, "codeScanResult.json"] + filename = concat("_", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=codeSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + critical_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "CRITICAL"] + secrets_count = count(critical_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for %v/%v Github repository for branch %v.\nBelow are the secrets identified:\n %s", [input.metadata.owner, input.metadata.repository, input.metadata.branch, concat(",\n", critical_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 276: ` + package opsmx + + default secrets_count = 0 + + request_url = concat("/",[input.metadata.toolchain_addr,"api", "v1", "scanResult?fileName="]) + filename_components = ["CodeScan", input.metadata.owner, input.metadata.repository, input.metadata.build_id, "codeScanResult.json"] + filename = concat("_", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=codeSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + medium_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "MEDIUM"] + secrets_count = count(medium_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for %v/%v Github repository for branch %v.\nBelow are the secrets identified:\n %s", [input.metadata.owner, input.metadata.repository, input.metadata.branch, concat(",\n", medium_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 277: ` + package opsmx + + default secrets_count = 0 + + request_url = concat("/",[input.metadata.toolchain_addr,"api", "v1", "scanResult?fileName="]) + filename_components = ["CodeScan", input.metadata.owner, input.metadata.repository, input.metadata.build_id, "codeScanResult.json"] + filename = concat("_", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=codeSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + low_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "LOW"] + secrets_count = count(low_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for %v/%v Github repository for branch %v.\nBelow are the secrets identified:\n %s", [input.metadata.owner, input.metadata.repository, input.metadata.branch, concat(",\n", low_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 278: ` + package opsmx + + default secrets_count = 0 + + default image_name = "" + + image_name = input.metadata.image { + not contains(input.metadata.image,"/") + } + image_name = split(input.metadata.image,"/")[1] { + contains(input.metadata.image,"/") + } + + request_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName="]) + image_sha = replace(input.metadata.image_sha, ":", "-") + filename_components = [image_sha, "imageSecretScanResult.json"] + filename = concat("-", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=imageSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + high_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "HIGH"] + secrets_count = count(high_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for Artifact %v:%v.\nBelow are the secrets identified:\n %v", [image_name, input.metadata.image_tag, concat(",\n", high_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 279: ` + package opsmx + + default secrets_count = 0 + + default image_name = "" + + image_name = input.metadata.image { + not contains(input.metadata.image,"/") + } + image_name = split(input.metadata.image,"/")[1] { + contains(input.metadata.image,"/") + } + + request_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName="]) + image_sha = replace(input.metadata.image_sha, ":", "-") + filename_components = [image_sha, "imageSecretScanResult.json"] + filename = concat("-", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=imageSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + critical_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "CRITICAL"] + secrets_count = count(critical_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for Artifact %v:%v.\nBelow are the secrets identified:\n %v", [image_name, input.metadata.image_tag, concat(",\n", critical_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 280: ` + package opsmx + + default secrets_count = 0 + + default image_name = "" + + image_name = input.metadata.image { + not contains(input.metadata.image,"/") + } + image_name = split(input.metadata.image,"/")[1] { + contains(input.metadata.image,"/") + } + + request_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName="]) + image_sha = replace(input.metadata.image_sha, ":", "-") + filename_components = [image_sha, "imageSecretScanResult.json"] + filename = concat("-", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=imageSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + medium_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "MEDIUM"] + secrets_count = count(medium_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for Artifact %v:%v.\nBelow are the secrets identified:\n %v", [image_name, input.metadata.image_tag, concat(",\n", medium_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 281: ` + package opsmx + + default secrets_count = 0 + + default image_name = "" + + image_name = input.metadata.image { + not contains(input.metadata.image,"/") + } + image_name = split(input.metadata.image,"/")[1] { + contains(input.metadata.image,"/") + } + + request_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName="]) + image_sha = replace(input.metadata.image_sha, ":", "-") + filename_components = [image_sha, "imageSecretScanResult.json"] + filename = concat("-", filename_components) + + complete_url = concat("", [request_url, filename, "&scanOperation=imageSecretScan"]) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + low_severity_secrets = [response.body.Results[0].Secrets[i].Title | response.body.Results[0].Secrets[i].Severity == "LOW"] + secrets_count = count(low_severity_secrets) + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + secrets_count > 0 + + msg := sprintf("Secret found for Artifact %v:%v.\nBelow are the secrets identified:\n %v", [image_name, input.metadata.image_tag, concat(",\n", low_severity_secrets)]) + sugg := "Eliminate the aforementioned sensitive information to safeguard confidential data." + error := "" + }`, + + 282: ` + package opsmx + default high_severities = [] + + default multi_alert = false + default exists_alert = false + + exists_alert = check_if_high_alert_exists + multi_alert = check_if_multi_alert + + check_if_high_alert_exists = exists_flag { + high_severities_counter = count(input.metadata.results[0].HighSeverity) + high_severities_counter > 0 + exists_flag = true + } + + check_if_multi_alert() = multi_flag { + high_severities_counter = count(input.metadata.results[0].HighSeverity) + high_severities_counter > 1 + multi_flag = true + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + check_if_high_alert_exists + check_if_multi_alert + + some i + rule = input.metadata.results[0].HighSeverity[i].RuleID + title = input.metadata.results[0].HighSeverity[i].Title + targets = concat(",\n", input.metadata.results[0].HighSeverity[i].TargetResources) + resolution = input.metadata.results[0].HighSeverity[i].Resolution + msg := sprintf("Rule ID: %v,\nTitle: %v. \nBelow are the sources of High severity:\n %v", [rule, title, targets]) + sugg := resolution + error := "" + }`, + + 283: ` + package opsmx + default critical_severities = [] + + default multi_alert = false + default exists_alert = false + + exists_alert = check_if_critical_alert_exists + multi_alert = check_if_multi_alert + + check_if_critical_alert_exists = exists_flag { + critical_severities_counter = count(input.metadata.results[0].CriticalSeverity) + critical_severities_counter > 0 + exists_flag = true + } + + check_if_multi_alert() = multi_flag { + critical_severities_counter = count(input.metadata.results[0].CriticalSeverity) + critical_severities_counter > 1 + multi_flag = true + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + check_if_critical_alert_exists + check_if_multi_alert + + some i + rule = input.metadata.results[0].CriticalSeverity[i].RuleID + title = input.metadata.results[0].CriticalSeverity[i].Title + targets = concat(",\n", input.metadata.results[0].CriticalSeverity[i].TargetResources) + resolution = input.metadata.results[0].CriticalSeverity[i].Resolution + msg := sprintf("Rule ID: %v,\nTitle: %v. \nBelow are the sources of critical severity:\n %v", [rule, title, targets]) + sugg := resolution + error := "" + }`, + + 284: ` + package opsmx + default medium_severities = [] + + default multi_alert = false + default exists_alert = false + + exists_alert = check_if_medium_alert_exists + multi_alert = check_if_multi_alert + + check_if_medium_alert_exists = exists_flag { + medium_severities_counter = count(input.metadata.results[0].MediumSeverity) + medium_severities_counter > 0 + exists_flag = true + } + + check_if_multi_alert() = multi_flag { + medium_severities_counter = count(input.metadata.results[0].MediumSeverity) + medium_severities_counter > 1 + multi_flag = true + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + check_if_medium_alert_exists + check_if_multi_alert + + some i + rule = input.metadata.results[0].MediumSeverity[i].RuleID + title = input.metadata.results[0].MediumSeverity[i].Title + targets = concat(",\n", input.metadata.results[0].MediumSeverity[i].TargetResources) + resolution = input.metadata.results[0].MediumSeverity[i].Resolution + msg := sprintf("Rule ID: %v,\nTitle: %v. \nBelow are the sources of medium severity:\n %v", [rule, title, targets]) + sugg := resolution + error := "" + }`, + + 285: ` + package opsmx + default low_severities = [] + + default multi_alert = false + default exists_alert = false + + exists_alert = check_if_low_alert_exists + multi_alert = check_if_multi_alert + + check_if_low_alert_exists = exists_flag { + low_severities_counter = count(input.metadata.results[0].LowSeverity) + low_severities_counter > 0 + exists_flag = true + } + + check_if_multi_alert() = multi_flag { + low_severities_counter = count(input.metadata.results[0].LowSeverity) + low_severities_counter > 1 + multi_flag = true + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error }]{ + check_if_low_alert_exists + check_if_multi_alert + + some i + rule = input.metadata.results[0].LowSeverity[i].RuleID + title = input.metadata.results[0].LowSeverity[i].Title + targets = concat(",\n", input.metadata.results[0].LowSeverity[i].TargetResources) + resolution = input.metadata.results[0].LowSeverity[i].Resolution + msg := sprintf("Rule ID: %v,\nTitle: %v. \nBelow are the sources of low severity:\n %v", [rule, title, targets]) + sugg := resolution + error := "" + }`, + + 286: ` + package opsmx + import future.keywords.in + + default allow = false + default private_repo = "" + + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url, "api/v4/projects/", input.metadata.gitlab_project_id]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration." + error := "Repository not found while trying to fetch Repository Configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Github upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Github API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.visibility != "private" + msg := sprintf("Gitlab Project %v is publically visible.", [input.metadata.repository]) + sugg := "Kindly adhere to security standards and change the visibility of the repository to private." + error := "" + }`, + + 287: ` + package opsmx + import future.keywords.in + + default allow = false + default number_of_merges = 0 + default merges_unreviewed = [] + default merges_reviewed_by_bots = [] + default merges_reviewed_by_author = [] + + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/projects/", input.metadata.gitlab_project_id, "/merge_requests?state=merged&order_by=created_at"]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + } + + number_of_merges = count(response.body) + merges_unreviewed = [response.body[i].iid | count(response.body[i].reviewers) == 0] + merges_reviewed_by_bots = [response.body[i].iid | contains(response.body[i].reviewers[j].username, "bot")] + merges_reviewed_by_author = [response.body[i].iid | response.body[i].reviewers[j].username == response.body[i].author.username] + + deny[{"alertMsg": msg, "error": error, "suggestion": sugg}]{ + count(merges_reviewed_by_bots) > 0 + msg := sprintf("Merge Request with bot user as reviewer found. Merge Request ID: %v.",[merges_reviewed_by_bots]) + sugg := "Adhere to security standards by restricting reviews by bot users." + error := "" + } + + deny[{"alertMsg": msg, "error": error, "suggestion": sugg}]{ + count(merges_reviewed_by_author) > 0 + msg := sprintf("Merge Request with Author as reviewer found. Merge Request ID: %v.",[merges_reviewed_by_author]) + sugg := "Adhere to security standards by restricting reviews by authors." + error := "" + } + + deny[{"alertMsg": msg, "error": error, "suggestion": sugg}]{ + count(merges_unreviewed) > 0 + msg := sprintf("Unreviewed Merge Requests found to be merged. Merge Request ID: %v.",[merges_unreviewed]) + sugg := "Adhere to security standards by restricting merges without reviews." + error := "" + }`, + + 288: ` + package opsmx + import future.keywords.in + + default allow = false + + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/projects/", input.metadata.gitlab_project_id, "/repository/branches/", input.metadata.branch]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200] + response.body.protected == false + msg := sprintf("Branch %v of Gitlab repository %v is not protected by a branch protection policy.", [input.metadata.branch, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by enforcing Branch Protection Policy for branches of %v Gitlab repository.",[input.metadata.repository]) + error := "" + }`, + + 289: ` + package opsmx + + import future.keywords.in + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/projects/", input.metadata.gitlab_project_id, "/members"]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository members due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository members." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository members." + error := "Mentioned branch for Repository not found while trying to fetch repository members." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := [response.body[i].username | response.body[i].access_level == 50] + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Owner access of Gitlab Repository is granted to bot users. \n Number of bot users having owner access: %v. \n Name of bots having owner access: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v/%v Repository.", [input.metadata.repository,input.metadata.owner]) + error := "" + }`, + + 290: ` + package opsmx + import future.keywords.in + + default allow = false + + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/projects/", input.metadata.gitlab_project_id, "/repository/files/SECURITY.md?ref=", input.metadata.branch]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := sprintf("SECURITY.md file not found in branch %v of repository %v.", [input.metadata.branch, input.metadata.repository]) + sugg := "Adhere to security standards and configure SECURITY.md file in the repository." + error := "" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + }`, + + 291: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/user"] + + request_url = concat("",request_components) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.two_factor_enabled == false + msg := sprintf("Gitlab Organisation %v doesnt have the mfa enabled.", [input.metadata.owner]) + sugg := sprintf("Adhere to the company policy by enabling 2FA for users of %s organisation.",[input.metadata.owner]) + error := "" + }`, + + 292: ` + package opsmx + import future.keywords.in + + default allow = false + + request_url = concat("", [input.metadata.ssd_secret.gitlab.rest_api_url,"api/v4/projects/", input.metadata.gitlab_project_id, "/hooks"]) + + token = input.metadata.ssd_secret.gitlab.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "PRIVATE-TOKEN": sprintf("%v", [token]), + }, + } + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository webhook configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get webhook configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read webhook configuration." + error := "Mentioned branch for Repository not found while trying to fetch webhook configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Gitlab is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "" + error := sprintf("Error %v receieved from Gitlab upon trying to fetch Repository Configuration.", [response.body.message]) + sugg := "Kindly check Gitlab API is reachable and the provided access token has required permissions." + } + + default ssl_disabled_hooks = [] + ssl_disabled_hooks = [response.body[i].id | response.body[i].enable_ssl_verification == false] + + deny[{"alertMsg": msg, "error": error, "suggestion": sugg}]{ + count(ssl_disabled_hooks) > 0 + msg := sprintf("Webhook SSL Check failed: SSL/TLS not enabled for %v/%v repository.", [input.metadata.owner,input.metadata.repository]) + error := "" + sugg := sprintf("Adhere to the company policy by enabling the webhook ssl/tls for %v/%v repository.", [input.metadata.owner,input.metadata.repository]) + }`, + + 293: ``, + + 294: ``, + + 295: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, input.metadata.repository] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Repository not found while trying to fetch Repository Configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.is_private = false + msg := sprintf("Bitbucket repository is a public repo %v.", [input.metadata.repository]) + sugg := "Please change the repository visibility to private." + error := "" + }`, + + 296: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, "policies/branch-restrictions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + abc = [user | + user = response.body.values[i]; + user.kind == "require_approvals_to_merge" + user.pattern = input.metadata.branch + ] + + reviewers = abc[_].value + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository branch protection policy configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch repository branch protection policy configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + abc[_].value <= 1 + msg := sprintf("The branch protection policy that mandates a pull request before merging has mandatory reviewers count less than required for the %s branch of the Bitbucket", [input.metadata.branch]) + sugg := "Adhere to the company policy by establishing the correct minimum reviewers for Bitbucket" + error := "" + }`, + + 297: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, input.metadata.repository, "branch-restrictions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + branch_protect = [response.body.values[i].pattern | response.body.values[i].type == "branchrestriction"] + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository branch protection policy configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch repository branch protection policy configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + protect = branch_protect[_] + input.metadata.branch == protect + msg := sprintf("Branch %v of Bitbucket repository %v is protected by a branch protection policy.", [input.metadata.branch, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by enforcing Branch Protection Policy for branches of %v Bitbucket repository.",[input.metadata.repository]) + error := "" + }`, + + 298: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, "policies/branch-restrictions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + details = [ response.body.values[i].pattern | response.body.values[i].kind == "delete"] + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository branch protection policy configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch repository branch protection policy configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + list = details[_] + input.metadata.branch == list + msg := sprintf("The branch protection policy that mandates branch %v cannot be deleted", [input.metadata.branch]) + sugg := "Adhere to the company policy branch cannot be deleted in Bitbucket" + error := "" + }`, + + 299: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, input.metadata.repository, "branch-restrictions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + admins= [response.body.values[i].users[_].display_name | response.body.values[i].kind == "restrict_merges"] + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check organisation configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get organisation configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Organisation not found while trying to fetch org configuration." + sugg := "Kindly check if the organisation provided is correct and the access token has rights to read organisation configuration." + error := "Organisation name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch organisation configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch organisation configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := admins + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Maintainer and Admin access of Bitbucket Repository providing ability to merge code is granted to bot users. Number of bot users having permissions to merge: %v. Name of bots having permissions to merge: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v/%v Repository.", [input.metadata.repository,input.metadata.owner]) + error := "" + }`, + + 300: ` + package opsmx + import future.keywords.in + default allow = false + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/workspaces", input.metadata.owner, "permissions/repositories",input.metadata.repository] + request_url = concat("/",request_components) + token = input.metadata.ssd_secret.bitbucket.token + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + response = http.send(request) + + allow { + response.status_code = 200 + } + + admins = [response.body.values[i].user.display_name| response.body.values[i].permission == "admin"] + + response = http.send(request) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check repository collaborators." + sugg := "Kindly check the access token. It must have enough permissions to get repository collaborators." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository collaborators." + error := "Mentioned branch for Repository not found while trying to fetch repository collaborators. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "BitBucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch repository collaborators. Error %v:%v receieved from Bitbucket.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := admins + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Admin access of Bitbucket Repository providing ability to merge code is granted to bot users. Number of bot users having permissions as repository admins: %v. Name of bots having permissions as repository admins: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v/%v Repository.", [input.metadata.repository,input.metadata.owner]) + error := "" + }`, + + 301: ` + package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/workspaces", input.metadata.owner, "permissions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "401 Unauthorized: Unauthorized to check organisation members." + sugg := "Kindly check the access token. It must have enough permissions to get organisation members." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read organisation members." + error := "Mentioned branch for Repository not found while trying to fetch organisation members. Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "" + error := sprintf("Unable to fetch organisation members. Error %v:%v receieved from Github.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + admins = [response.body.values[i].user.display_name | response.body.values[i].permission == "owner"] + + default denial_list = false + + denial_list = matched_users + + matched_users[user] { + users := admins + user := users[_] + patterns := ["bot", "auto", "test", "jenkins", "drone", "github", "gitlab", "aws", "azure"] + some pattern in patterns + regex.match(pattern, user) + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}] { + counter := count(denial_list) + counter > 0 + denial_list_str := concat(", ", denial_list) + msg := sprintf("Owner access of Bitbucket Organization is granted to bot users. Number of bot users having owner access: %v. Name of bots having owner access: %v", [counter, denial_list_str]) + sugg := sprintf("Adhere to the company policy and revoke access of bot user for %v Organization.", [input.metadata.owner]) + error := "" + }`, + + 302: `package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, "policies/branch-restrictions"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + auto_merge = [ response.body.values[i].pattern | response.body.values[i].kind == "allow_auto_merge_when_builds_pass"] + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check organisation configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get organisation configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Organisation not found while trying to fetch org configuration." + sugg := "Kindly check if the organisation provided is correct and the access token has rights to read organisation configuration." + error := "Organisation name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch organisation configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch organisation configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + list = auto_merge[_] + input.metadata.branch == list + msg = sprintf("Auto Merge is allowes in repo %v of branch %v", [input.metadata.repository,input.metadata.branch]) + error = "" + sugg = "Kindly restrict auto merge in Branch Protection Policy applied to repository." + }`, + + 303: `package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/workspaces", input.metadata.owner, "permissions/repositories",input.metadata.repository] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + admin = [entry | + entry = response.body.values[i]; + entry.type == "repository_permission" + entry.permission == "admin"] + + admin_users = count(admin) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 301, 302] + not response.status_code in codes + msg := "Unable to fetch repository configuration." + error := sprintf("Error %v:%v receieved from Github upon trying to fetch Repository Configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code in [200] + admin_users <= 1 + msg := sprintf("Organisation/Worskspace %v should have more than one owner so access to the code is not jeopardized",[input.metadata.owner,]) + sugg := "To reduce the attack surface it is recommended to have more than 1 admin of an organization or workspace" + error := "" + }`, + + 304: `package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/workspaces", input.metadata.owner, "permissions/repositories",input.metadata.repository] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + #admin = [response.body.values[i] | response.body.values[i].type == "repository_permission" | response.body.values[i].permission == "admin"] + + admin = [user | + user = response.body.values[i]; + user.type == "repository_permission" + user.permission == "admin" + ] + + admin_users = count(admin) + + all = [user | + user = response.body.values[i]; + user.type == "repository_permission" + user.user.type == "user" + ] + + total_users = count(all) + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "" + error := "Unauthorized to check repository branch protection policy configuration due to Bad Credentials." + sugg := "Kindly check the access token. It must have enough permissions to get repository branch protection policy configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "" + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository branch protection policy configuration." + error := "Mentioned branch for Repository not found while trying to fetch repository branch protection policy configuration." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + admin_percentage = admin_users / total_users * 100 + + admin_percentage > 5 + msg := sprintf("More than 5 percentage of total collaborators of %v Bitbucket repository have admin access", [input.metadata.repository]) + sugg := sprintf("Adhere to the company policy and revoke admin access to some users of the repo %v", [input.metadata.repository]) + error := "" + }`, + + 305: `package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, input.metadata.repository, "hooks"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + webhook = response.body.values + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check organisation configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get organisation configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Organisation not found while trying to fetch org configuration." + sugg := "Kindly check if the organisation provided is correct and the access token has rights to read organisation configuration." + error := "Organisation name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch organisation configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch organisation configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + count(webhook) == 0 + msg = sprintf("Webhooks is not present for the repo %v", [input.metadata.repository]) + error = "" + sugg = "Kindly enable webhooks for the repository." + }`, + + 306: `package opsmx + import future.keywords.in + + default allow = false + + request_components = [input.metadata.ssd_secret.bitbucket.rest_api_url,"2.0/repositories", input.metadata.owner, input.metadata.repository, "hooks"] + + request_url = concat("/",request_components) + + token = input.metadata.ssd_secret.bitbucket.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + certs_check = response.body.values[_].skip_cert_verification + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check organisation configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get organisation configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Mentioned Organisation not found while trying to fetch org configuration." + sugg := "Kindly check if the organisation provided is correct and the access token has rights to read organisation configuration." + error := "Organisation name is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "Bitbucket is not reachable." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + codes = [401, 404, 500, 200, 302] + not response.status_code in codes + msg := "Unable to fetch organisation configuration." + error := sprintf("Error %v:%v receieved from Bitbucket upon trying to fetch organisation configuration.", [response.status_code, response.body.message]) + sugg := "Kindly check Bitbucket API is reachable and the provided access token has required permissions." + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + certs_check = false + msg := sprintf("Webhook SSL Check failed: SSL/TLS not enabled for %v/%v repository.", [input.metadata.owner,input.metadata.repository]) + error := "" + sugg := sprintf("Adhere to the company policy by enabling the webhook ssl/tls for %v/%v repository.", [input.metadata.owner,input.metadata.repository]) + }`, + + 307: ` + package opsmx + + severity = "High" + default findings_count = 0 + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + findings_count = count([response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity]) + findings = [response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Snyk Code Scan: %v ",[findings[i].ruleName]) + msg := sprintf("%v: %v", [findings[i].ruleName, findings[i].ruleMessage]) + sugg := "Please examine the high severity findings in the Snyk analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 308: ` + package opsmx + + severity = "Medium" + default findings_count = 0 + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + findings_count = count([response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity]) + findings = [response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Snyk Code Scan: %v ",[findings[i].ruleName]) + msg := sprintf("%v: %v", [findings[i].ruleName, findings[i].ruleMessage]) + sugg := "Please examine the medium severity findings in the Snyk analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 309: ` + package opsmx + + severity = "Low" + default findings_count = 0 + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=analysis_", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codescan_snyk.json&scanOperation=snykcodescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + + findings_count = count([response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity]) + findings = [response.body.snykAnalysis[idx] | response.body.snykAnalysis[idx].severity == severity] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + findings_count > 0 + some i + title := sprintf("Snyk Code Scan: %v ",[findings[i].ruleName]) + msg := sprintf("%v: %v", [findings[i].ruleName, findings[i].ruleMessage]) + sugg := "Please examine the low severity findings in the Snyk analysis data, available through the View Findings button and proactively review your code for common issues and apply best coding practices during development to prevent such alerts from arising." + error := "" + }`, + + 310: ` + package opsmx + + default license_count = 0 + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Code License Scan: No license found." + msg := sprintf("Code License Scan: No license found to be associated with repository %v:%v.",[input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository to be able to evaluate quality of license." + error := "" + }`, + + 311: ` + package opsmx + + default license_count = 0 + default low_severity_licenses = [] + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Code License Scan: No license found." + msg := sprintf("Code License Scan: No license found to be associated with repository %v:%v.",[input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with repository %v:%v.", [input.metadata1.owner, input.metadata1.repository]) + } + + low_severity_licenses = [licenses[idx].Name | licenses[idx].Severity == "LOW"] + license_names = concat(",", low_severity_licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(low_severity_licenses) > 0 + title := "Code License Scan: Low Severity Licenses Found." + msg := sprintf("Code License Scan: Low Severity License: %v found to be associated with repository %v:%v.",[license_names, input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository." + error := "" + }`, + + 312: ` + package opsmx + import future.keywords.in + + default license_count = 0 + default medium_severity_licenses = [] + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := input.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Code License Scan: No license found." + msg := sprintf("Code License Scan: No license found to be associated with repository %v:%v.",[input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with repository %v:%v.", [input.metadata1.owner, input.metadata1.repository]) + } + + medium_severity_licenses = [licenses[idx].Name | licenses[idx].Severity in ["MEDIUM", "UNKNOWN"]] + license_names = concat(",", medium_severity_licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(medium_severity_licenses) > 0 + title := "Code License Scan: Medium Severity Licenses Found." + msg := sprintf("Code License Scan: Medium Severity License: %v found to be associated with repository %v:%v.",[license_names, input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository." + error := "" + }`, + + 313: ` + package opsmx + + default license_count = 0 + default high_severity_licenses = [] + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Code License Scan: No license found." + msg := sprintf("Code License Scan: No license found to be associated with repository %v:%v.",[input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with repository %v:%v.", [input.metadata1.owner, input.metadata1.repository]) + } + + high_severity_licenses = [licenses[idx].Name | licenses[idx].Severity == "HIGH"] + license_names = concat(",", high_severity_licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(high_severity_licenses) > 0 + title := "Code License Scan: High Severity Licenses Found." + msg := sprintf("Code License Scan: High Severity License: %v found to be associated with repository %v:%v.",[license_names, input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository." + error := "" + }`, + + 314: ` + package opsmx + + default license_count = 0 + default critical_severity_licenses = [] + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_codeLicenseScanResult.json&scanOperation=codelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Code License Scan: No license found." + msg := sprintf("Code License Scan: No license found to be associated with repository %v:%v.",[input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with repository %v:%v.", [input.metadata1.owner, input.metadata1.repository]) + } + + critical_severity_licenses = [licenses[idx].Name | licenses[idx].Severity == "CRITICAL"] + license_names = concat(",", critical_severity_licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(critical_severity_licenses) > 0 + title := "Code License Scan: Critical Severity Licenses Found." + msg := sprintf("Code License Scan: Critical Severity License: %v found to be associated with repository %v:%v.",[license_names, input.metadata.owner, input.metadata.repository]) + sugg := "Please associate appropriate license with code repository." + error := "" + }`, + + 315: ` + package opsmx + + default license_count = 0 + + image_sha = replace(input.metadata.image_sha, ":", "-") + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Artifact License Scan: No license found." + msg := sprintf("Artifact License Scan: No license found to be associated with artifact %v:%v.",[input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact to be able to evaluate quality of license." + error := "" + }`, + + 316: ` + package opsmx + + default license_count = 0 + default low_severity_licenses = [] + + image_sha = replace(input.metadata.image_sha, ":", "-") + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Artifact License Scan: No license found." + msg := sprintf("Artifact License Scan: No license found to be associated with artifact %v:%v.",[input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with artifact %v:%v.", [input.metadata.image, input.metadata.image_tag]) + } + + low_severity_licenses = [licenses[idx] | licenses[idx].Severity == "LOW"] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(low_severity_licenses) > 0 + some i + title := sprintf("Artifact License Scan: Package: %v/ License: %v/ Category: %v", [low_severity_licenses[i].PkgName, low_severity_licenses[i].Name, low_severity_licenses[i].Category]) + msg := sprintf("Artifact License Scan: Critical Severity License: %v found to be associated with %v in artifact %v:%v.",[low_severity_licenses[i].Name, low_severity_licenses[i].PkgName, input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact and associated dependencies or upgrade the dependencies to their licensed arternatives." + error := "" + }`, + + 317: ` + package opsmx + import future.keywords.in + + default license_count = 0 + default medium_severity_licenses = [] + + image_sha = replace(input.metadata.image_sha, ":", "-") + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Artifact License Scan: No license found." + msg := sprintf("Artifact License Scan: No license found to be associated with artifact %v:%v.",[input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with artifact %v:%v.", [input.metadata.image, input.metadata.image_tag]) + } + + medium_severity_licenses = [licenses[idx] | licenses[idx].Severity in ["MEDIUM", "UNKNOWN"]] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(medium_severity_licenses) > 0 + some i + title := sprintf("Artifact License Scan: Package: %v/ License: %v/ Category: %v", [medium_severity_licenses[i].PkgName, medium_severity_licenses[i].Name, medium_severity_licenses[i].Category]) + msg := sprintf("Artifact License Scan: Critical Severity License: %v found to be associated with %v in artifact %v:%v.",[medium_severity_licenses[i].Name, medium_severity_licenses[i].PkgName, input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact and associated dependencies or upgrade the dependencies to their licensed arternatives." + error := "" + }`, + + 318: ` + package opsmx + + default license_count = 0 + default high_severity_licenses = [] + + image_sha = replace(input.metadata.image_sha, ":", "-") + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Artifact License Scan: No license found." + msg := sprintf("Artifact License Scan: No license found to be associated with artifact %v:%v.",[input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with artifact %v:%v.", [input.metadata.image, input.metadata.image_tag]) + } + + high_severity_licenses = [licenses[idx] | licenses[idx].Severity == "HIGH"] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(high_severity_licenses) > 0 + some i + title := sprintf("Artifact License Scan: Package: %v/ License: %v/ Category: %v", [high_severity_licenses[i].PkgName, high_severity_licenses[i].Name, high_severity_licenses[i].Category]) + msg := sprintf("Artifact License Scan: Critical Severity License: %v found to be associated with %v in artifact %v:%v.",[high_severity_licenses[i].Name, high_severity_licenses[i].PkgName, input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact and associated dependencies or upgrade the dependencies to their licensed arternatives." + error := "" + }`, + + 319: ` + package opsmx + + default license_count = 0 + default critical_severity_licenses = [] + + image_sha = replace(input.metadata.image_sha, ":", "-") + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", image_sha, "-imageLicenseScanResult.json&scanOperation=imagelicensescan"] ) + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.Results + + licenses := [lic | + results[_].Class == "license-file" + result := results[_] + lic := result.Licenses[_] + lic.Name != "" + ] + + license_count = count(licenses) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + license_count == 0 + title := "Artifact License Scan: No license found." + msg := sprintf("Artifact License Scan: No license found to be associated with artifact %v:%v.",[input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact to be able to evaluate quality of license." + error := sprintf("No licenses found to be associated with artifact %v:%v.", [input.metadata.image, input.metadata.image_tag]) + } + + critical_severity_licenses = [licenses[idx] | licenses[idx].Severity == "CRITICAL"] + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + count(critical_severity_licenses) > 0 + some i + title := sprintf("Artifact License Scan: Package: %v/ License: %v/ Category: %v", [critical_severity_licenses[i].PkgName, critical_severity_licenses[i].Name, critical_severity_licenses[i].Category]) + msg := sprintf("Artifact License Scan: Critical Severity License: %v found to be associated with %v in artifact %v:%v.",[critical_severity_licenses[i].Name, critical_severity_licenses[i].PkgName, input.metadata.image, input.metadata.image_tag]) + sugg := "Please associate appropriate license with artifact and associated dependencies or upgrade the dependencies to their licensed arternatives." + error := "" + }`, + + 320: ` + package opsmx + + default url_count = 0 + default malicious_urls = [] + default malicious_urls_count = 0 + + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_virustotal_url_scan.json&scanOperation=virustotalscan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_virustotal_url_scan.json&scanOperation=virustotalscan"] ) + + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.summaryResult + repo_name := response.body.repoName + branch := response.body.branch + + malicious_urls := [results[idx] | results[idx].malicious > 0] + + malicious_urls_count = count(malicious_urls) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + malicious_urls_count > 0 + some i + title := sprintf("Suspicious URL %v found in Repository: %v Branch: %v.", [malicious_urls[i].url, repo_name, branch]) + msg := sprintf("Suspicious URL %v found in Repository: %v Branch: %v. \nSummary of Scan Results: \nHarmless: %v\nMalicious: %v\nSuspicious: %v\nUndetected: %v\nTimeout: %v",[malicious_urls[i].url, repo_name, branch, malicious_urls[i].harmless, malicious_urls[i].malicious, malicious_urls[i].malicious, malicious_urls[i].undetected, malicious_urls[i].timeout]) + sugg := "Suggest securing the webhook endpoints from malicious activities by enabling security measures and remove any unwanted URL references from source code repository and configurations." + error := "" + }`, + + 321: ` + package opsmx + + default url_count = 0 + default suspicious_urls = [] + default suspicious_urls_count = 0 + + complete_url = concat("",[input.metadata.toolchain_addr,"api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_virustotal_url_scan.json&scanOperation=virustotalscan"]) + download_url = concat("",["tool-chain/api/v1/scanResult?fileName=", input.metadata.owner, "_", input.metadata.repository, "_", input.metadata.build_id, "_virustotal_url_scan.json&scanOperation=virustotalscan"] ) + + + request = { + "method": "GET", + "url": complete_url + } + + response = http.send(request) + results := response.body.summaryResult + repo_name := response.body.repoName + branch := response.body.branch + + suspicious_urls := [results[idx] | results[idx].suspicious > 0] + + suspicious_urls_count = count(suspicious_urls) + + deny[{"alertTitle": title, "alertMsg": msg, "suggestion": sugg, "error": error, "fileApi": download_url}]{ + suspicious_urls_count > 0 + some i + title := sprintf("Suspicious URL %v found in Repository: %v Branch: %v.", [suspicious_urls[i].url, repo_name, branch]) + msg := sprintf("Suspicious URL %v found in Repository: %v Branch: %v. \nSummary of Scan Results: \nHarmless: %v\nMalicious: %v\nSuspicious: %v\nUndetected: %v\nTimeout: %v",[suspicious_urls[i].url, repo_name, branch, suspicious_urls[i].harmless, suspicious_urls[i].malicious, suspicious_urls[i].suspicious, suspicious_urls[i].undetected, suspicious_urls[i].timeout]) + sugg := "Suggest securing the webhook endpoints from suspicious activities by enabling security measures and remove any unwanted URL references from source code repository and configurations." + error := "" + }`, + + 322: ` + package opsmx + + import future.keywords.in + + # Define sensitive keywords to look for in the workflow + sensitive_keywords = ["API_KEY", "SECRET_KEY", "PASSWORD", "TOKEN"] + + # Helper function to check if a string contains any sensitive keyword + contains_sensitive_keyword(value) = true { + some keyword in sensitive_keywords + contains(value, keyword) + } + + contains_sensitive_keyword(_) = false + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if any step contains hardcoded sensitive data + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + job := workflow.jobs[_] + step := job.steps[_] + + # Check the run field for hardcoded sensitive data + step.run + contains_sensitive_keyword(step.run) + + msg := sprintf("Hardcoded sensitive data found in step %s of job %s in workflow %s.", [step.name, job.name, input.metadata.ssd_secret.github.workflowName]) + sugg := "Reference sensitive data using GitHub Secrets instead of hardcoding them in the workflow." + error := "" + } + + # Check if any with field contains hardcoded sensitive data + #deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + # response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + # workflow_content := base64.decode(response.body.content) + # workflow := yaml.unmarshal(workflow_content) + # job := workflow.jobs[_] + # step := job.steps[_] + + # Check each with field for hardcoded sensitive data + # with_fields := {key: value | some key; value := step.with[key]} + # some key in keys(with_fields) + # contains_sensitive_keyword(with_fields[key]) + + # msg := sprintf("Hardcoded sensitive data found in with field of step %s of job %s in workflow %s.", [step.name, job.name, input.metadata.ssd_secret.github.workflowName]) + # sugg := "Reference sensitive data using GitHub Secrets instead of hardcoding them in the workflow." + # error := "" + #} + `, + + 323: ` + package opsmx + import future.keywords.in + + # Define a list of approved actions and their versions + approved_actions = { + "actions/checkout": "v2", + "actions/setup-node": "v2", + "docker/build-push-action": "v2", + "docker/login-action": "v1" + # Add more approved actions and their versions here + } + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if the actions used in the workflow are approved + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + job := workflow.jobs[_] + step := job.steps[_] + + # Check if the step uses an action + step.uses + split_step := split(step.uses, "@") + action_name := split_step[0] + action_version := split_step[1] + + # Ensure the action is in the approved list + not approved_actions[action_name] == action_version + + msg := sprintf("Action %v@%v is not from an approved source or version.", [action_name, action_version]) + sugg := "Update the action to an approved version listed in the policy, or contact the repository owner to approve the current version." + error := "" + }`, + + 324: ` + package opsmx + import future.keywords.in + + # Define a list of trusted sources for dependencies + trusted_sources = [ + "https://registry.npmjs.org/", + "https://pypi.org/simple/", + "https://rubygems.org/" + # Add more trusted sources here + ] + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if the dependencies are fetched from trusted sources + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + job := workflow.jobs[_] + step := job.steps[_] + + # Check if the step installs dependencies + step.run + some dependency in split(step.run, "\n") + contains(dependency, "install") + + # Verify the source of the dependency + not is_trusted_source(dependency) + + msg := sprintf("Dependency fetched from untrusted source in step %s of job %s in workflow %s.", [step.name, job.name, input.metadata.ssd_secret.github.workflowName]) + sugg := "Ensure all dependencies are fetched from trusted sources such as npm, PyPI, or RubyGems." + error := "" + } + + # Helper function to check if a dependency is from a trusted source + is_trusted_source(dependency) { + some trusted_source in trusted_sources + contains(dependency, trusted_source) + }`, + + 325: ` + package opsmx + + import future.keywords.in + + # Define allowed branches and events + allowed_branches = ["main", "master", "develop"] + allowed_events = {"push", "pull_request"} + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if workflows are triggered on allowed branches and events + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + on := workflow.on + + # Check for disallowed branches in push triggers + some branch in on.push.branches + not branch in allowed_branches + msg := sprintf("Workflow triggered on disallowed branch %v in push trigger in workflow %s.", [branch, input.metadata.ssd_secret.github.workflowName]) + sugg := "Ensure that the workflow is only triggered on allowed branches: main, master, or develop." + error := "" + trigger := "branch" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + on := workflow.on + + # Check for disallowed branches in pull_request triggers + some branch in on.pull_request.branches + not branch in allowed_branches + msg := sprintf("Workflow triggered on disallowed branch %v in pull_request trigger in workflow %s.", [branch, input.metadata.ssd_secret.github.workflowName]) + sugg := "Ensure that the workflow is only triggered on allowed branches: main, master, or develop." + error := "" + trigger := "branch" + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + on := workflow.on + + # Check for disallowed events + some event in object.keys(on) + not event in allowed_events + msg := sprintf("Workflow triggered on disallowed event %v in workflow %s.", [event, input.metadata.ssd_secret.github.workflowName]) + sugg := "Ensure that the workflow is only triggered on allowed events: push or pull_request." + error := "" + trigger := "event" + }`, + 326: ` + package opsmx + + import future.keywords.in + + # Define allowed protocols + allowed_protocols = ["https://", "ssh://"] + + # Helper function to check if a URL uses a secure protocol + uses_secure_protocol(url) = true { + some protocol in allowed_protocols + startswith(url, protocol) + } + + uses_secure_protocol(_) = false + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if all network communications use secure protocols + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + job := workflow.jobs[_] + step := job.steps[_] + + # Check the run field for insecure protocols + step.run + some line in split(step.run, "\n") + url := find_network_calls(line) + not uses_secure_protocol(url) + + msg := sprintf("Insecure protocol used in step %s of job %s in workflow %s. URL: %v", [step.name, job.name, input.metadata.ssd_secret.github.workflowName, url]) + sugg := "Use secure protocols (https or ssh) for all network communications." + error := "" + } + + # Helper function to extract http URLs from a line of text + find_http_url(line) = url { + start := indexof(line, "http://") + start != -1 + rest := substring(line, start, -1) + end := indexof(rest, " ") + end == -1 + url := substring(rest, 0, count(rest)) + } else { + start := indexof(line, "http://") + start != -1 + rest := substring(line, start, -1) + end := indexof(rest, " ") + end != -1 + url := substring(rest, 0, end) + } + + # Helper function to extract ftp URLs from a line of text + find_ftp_url(line) = url { + start := indexof(line, "ftp://") + start != -1 + rest := substring(line, start, -1) + end := indexof(rest, " ") + end == -1 + url := substring(rest, 0, count(rest)) + } else { + start := indexof(line, "ftp://") + start != -1 + rest := substring(line, start, -1) + end := indexof(rest, " ") + end != -1 + url := substring(rest, 0, end) + } + + # Combined helper function to extract insecure URLs from a line of text + find_network_calls(line) = url { + url := find_http_url(line) + url != "" + } else { + url := find_ftp_url(line) + url != "" + }`, + + 327: ` + package opsmx + + import future.keywords.in + + # Construct the request URL to list all workflows + list_workflows_url = sprintf("%s/repos/%s/%s/actions/workflows", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository + ]) + + token = input.metadata.ssd_secret.github.token + list_workflows_request = { + "method": "GET", + "url": list_workflows_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + list_workflows_response = http.send(list_workflows_request) + + # Find the workflow by name + workflow_file_path = workflow_path { + some workflow in list_workflows_response.body.workflows + workflow.name == input.metadata.ssd_secret.github.workflowName + workflow_path := workflow.path + } + + # Construct the request URL to fetch the workflow content + request_url = sprintf("%s/repos/%s/%s/contents/%s", [ + input.metadata.ssd_secret.github.rest_api_url, + input.metadata.owner, + input.metadata.repository, + workflow_file_path + ]) + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]), + }, + } + + response = http.send(request) + + # Check if the response status code is not 200 + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code != 200 + msg := "Failed to fetch the workflow." + error := sprintf("Error %v: %v received from GitHub when trying to fetch the workflow.", [response.status_code, response.body.message]) + sugg := "Ensure the provided GitHub token has the required permissions and the workflow name is correct." + } + + # Check if each job has a timeout configured + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + jobs := workflow.jobs + + some job_name in jobs + job := jobs[job_name] + not job["timeout-minutes"] + + msg := sprintf("Job %s in workflow %s does not have a timeout configured.", [job_name, input.metadata.ssd_secret.github.workflowName]) + sugg := "Configure a timeout for the job in the workflow file." + error := "" + } + + # Check if each step has a timeout configured (if applicable) + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}] { + response.status_code == 200 + + # Decode the workflow content from base64 and parse as YAML + workflow_content := base64.decode(response.body.content) + workflow := yaml.unmarshal(workflow_content) + jobs := workflow.jobs + + some job_name in jobs + job := jobs[job_name] + steps := job.steps + + some step_name in steps + step := steps[step_name] + not step["timeout-minutes"] + + msg := sprintf("Step %s in job %s of workflow %s does not have a timeout configured.", [step_name, job_name, input.metadata.ssd_secret.github.workflowName]) + sugg := "Configure a timeout for the step in the workflow file." + error := "" + }`, + + 328: ` + package opsmx + + default allow = false + + request_components = [input.metadata.ssd_secret.github.rest_api_url, "repos", input.metadata.owner, input.metadata.repository,"actions/permissions/workflow"] + + request_url = concat("/", request_components) + + token = input.metadata.ssd_secret.github.token + + request = { + "method": "GET", + "url": request_url, + "headers": { + "Authorization": sprintf("Bearer %v", [token]) + } + } + + response = http.send(request) + + allow { + response.status_code = 200 + } + + deny[{"alertMsg":msg, "suggestions": sugg, "error": error}]{ + response.status_code == 401 + msg := "Unauthorized to check repository configuration due to Bad Credentials." + error := "401 Unauthorized." + sugg := "Kindly check the access token. It must have enough permissions to get repository configurations." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 404 + msg := "Repository not found while trying to fetch Repository Configuration." + sugg := "Kindly check if the repository provided is correct and the access token has rights to read repository configuration." + error := "Repo name or Organisation is incorrect." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.status_code == 500 + msg := "Internal Server Error." + sugg := "" + error := "GitHub is not reachable." + } + + deny[{"alertMsg": msg, "suggestion": sugg, "error": error}]{ + response.body.default_workflow_permissions == "write" + msg := sprintf("Github actions workflow permissions are write permissions for %v/%v repository", [input.metadata.owner, input.metadata.repository]) + sugg := sprintf("Adhere to the company policy by the Github actions workflow permission should be read for %v/%v repository.", [input.metadata.owner, input.metadata.repository]) + error := "" + }`, +} + +var policyDefinition = []string{ + ` + { + "policyId":"1", + "orgId":"1", + "policyName":"Repository Access Control Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Code Repository should not be publicly visible or modifiable.", + "scheduled_policy":false, + "scriptId":"1", + "variables":"", + "conditionName":"Repository Access Control Policy" + } + `, + ` + { + "policyId":"2", + "orgId":"1", + "policyName":"Minimum Reviewers Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Pushed code should be reviewed by a minimum number of users:2 as defined in the policy.", + "scheduled_policy":false, + "scriptId":"2", + "variables":"", + "conditionName":"Minimum Reviewers Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"3", + "orgId":"1", + "policyName":"Branch Protection Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should have branch protection enabled requiring all code changes to be reviewed. This means disabling Push events and requiring Pull/Merge Requests to have code reviews.", + "scheduled_policy":false, + "scriptId":"3", + "variables":"", + "conditionName":"Branch Protection Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"4", + "orgId":"1", + "policyName":"Branch Deletion Prevention Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"While the default branch can’t be deleted directly even if the setting is on, in general, it is best practice to prevent branches from being deleted by anyone with write access.", + "scheduled_policy":false, + "scriptId":"4", + "variables":"", + "conditionName":"Branch Deletion Prevention Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"5", + "orgId":"1", + "policyName":"Commit Signing Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Commit signing should be mandatory. Signing commits is needed because it is pretty easy to add anyone as the author of a commit. Git allows a committer to change the author of a commit easily. In the case of a signed commit, any change to the author will make the commit appear unsigned.", + "scheduled_policy":false, + "scriptId":"5", + "variables":"", + "conditionName":"Commit Signing Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"6", + "orgId":"1", + "policyName":"Repository 2FA Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should be protected based on 2FA authentication", + "scheduled_policy":false, + "scriptId":"6", + "variables":"", + "conditionName":"Repository 2FA Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"7", + "orgId":"1", + "policyName":"Low Vulnerability Prevention Policy", + "category":"Vulnerability Analysis", + "stage":"artifact", + "description":"Low Severity Vulnerability should not be found in the artifact", + "scheduled_policy":true, + "scriptId":"7", + "variables":"", + "conditionName":"severity", + "suggestion":"" + } + `, + ` + { + "policyId":"8", + "orgId":"1", + "policyName":"Critical Vulnerability Prevention Policy", + "category":"Vulnerability Analysis", + "stage":"artifact", + "description":"Critical Severity Vulnerabilities should not be found in the artifact", + "scheduled_policy":true, + "scriptId":"8", + "variables":"", + "conditionName":"severity", + "suggestion":"" + } + `, + ` + { + "policyId":"9", + "orgId":"1", + "policyName":"Medium Vulnerability Prevention Policy", + "category":"Vulnerability Analysis", + "stage":"artifact", + "description":"Medium Severity Vulnerabilities should not be found in the artifact", + "scheduled_policy":true, + "scriptId":"9", + "variables":"", + "conditionName":"severity", + "suggestion":"" + } + `, + ` + { + "policyId":"10", + "orgId":"1", + "policyName":"Build Workflow Permissions over Organization Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Build Workflow should have minimum permissions over organization configuration.", + "scheduled_policy":false, + "scriptId":"10", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"11", + "orgId":"1", + "policyName":"Build Workflow Permissions over Repository Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Build Workflow should have minimum permissions over repository configuration", + "scheduled_policy":false, + "scriptId":"11", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"12", + "orgId":"1", + "policyName":"Identical Build and Cloud Artifact Policy", + "category":"Artifact Integrity", + "stage":"build", + "description":"Build signature in Build Environment and Cloud Environment during Deployment should be identical to confirm integrity of the artifact.", + "scheduled_policy":false, + "scriptId":"12", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"13", + "orgId":"1", + "policyName":"Open SSF Branch Protection Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This evaluates if the project main and release branches are safeguarded with GitHub branch protection settings, enforcing review and status check requirements before merging and preventing history changes.", + "scheduled_policy":false, + "scriptId":"13", + "variables":"", + "conditionName":"Open SSF Branch Protection Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"14", + "orgId":"1", + "policyName":"Open SSF CI Tests Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This assesses if the project enforces running tests before merging pull requests, currently applicable only to GitHub-hosted repositories, excluding other source hosting platforms.", + "scheduled_policy":false, + "scriptId":"14", + "variables":"", + "conditionName":"Open SSF CI Tests Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"15", + "orgId":"1", + "policyName":"Open SSF CII-Best Practices Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This evaluates if the project has achieved an OpenSSF Best Practices Badge to indicate adherence to security-focused best practices, using the Git repo URL and OpenSSF Badge API", + "scheduled_policy":false, + "scriptId":"15", + "variables":"", + "conditionName":"Open SSF CII-Best Practices Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"16", + "orgId":"1", + "policyName":"Open SSF Code Review Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check determines whether the project requires human code review before pull requests are merged.", + "scheduled_policy":false, + "scriptId":"16", + "variables":"", + "conditionName":"Open SSF Code Review Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"17", + "orgId":"1", + "policyName":"Open SSF Contributors Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check assesses if the project has recent contributors from various organizations, applicable only to GitHub-hosted repositories, without support for other source hosting platforms", + "scheduled_policy":false, + "scriptId":"17", + "variables":"", + "conditionName":"Open SSF Contributors Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"18", + "orgId":"1", + "policyName":"Open SSF Dangerous Workflow Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This identifies risky code patterns in the project GitHub Action workflows, such as untrusted code checkouts, logging sensitive information, or using potentially unsafe inputs in scripts", + "scheduled_policy":false, + "scriptId":"18", + "variables":"", + "conditionName":"Open SSF Dangerous Workflow Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"19", + "orgId":"1", + "policyName":"Open SSF Dependency Update Tool Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This evaluates if the project utilizes a dependency update tool like Dependabot, Renovate bot, Sonatype Lift, or PyUp to automate updating outdated dependencies and enhance security", + "scheduled_policy":false, + "scriptId":"19", + "variables":"", + "conditionName":"Open SSF Dependency Update Tool Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"20", + "orgId":"1", + "policyName":"Open SSF Fuzzing Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This assesses if the project employs fuzzing, considering various criteria including repository inclusion, fuzzing tool presence, language-specific functions, and integration files.", + "scheduled_policy":false, + "scriptId":"20", + "variables":"", + "conditionName":"Open SSF Fuzzing Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"21", + "orgId":"1", + "policyName":"Open SSF License Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This examines if the project has a published license by using hosting APIs or searching for a license file using standard naming conventions", + "scheduled_policy":false, + "scriptId":"21", + "variables":"", + "conditionName":"Open SSF License Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"22", + "orgId":"1", + "policyName":"Open SSF Maintained Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check evaluates project maintenance status based on commit frequency, issue activity, and archival status", + "scheduled_policy":false, + "scriptId":"22", + "variables":"", + "conditionName":"Open SSF Maintained Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"23", + "orgId":"1", + "policyName":"Open SSF Pinned Dependencies Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This verifies if a project locks its dependencies to specific versions by their hashes, applicable only to GitHub repositories.", + "scheduled_policy":false, + "scriptId":"23", + "variables":"", + "conditionName":"Open SSF Pinned Dependencies Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"24", + "orgId":"1", + "policyName":"Open SSF Packaging Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This assesses if the project is released as a package.", + "scheduled_policy":false, + "scriptId":"24", + "variables":"", + "conditionName":"Open SSF Packaging Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"25", + "orgId":"1", + "policyName":"Open SSF SAST Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check assesses if a GitHub-hosted project employs Static Application Security Testing.", + "scheduled_policy":false, + "scriptId":"25", + "variables":"", + "conditionName":"Open SSF SAST Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"26", + "orgId":"1", + "policyName":"Open SSF Security Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check tries to determine if the project has published a security policy. It works by looking for a file named SECURITY.md in a few well-known directories.", + "scheduled_policy":false, + "scriptId":"26", + "variables":"", + "conditionName":"Open SSF Security Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"27", + "orgId":"1", + "policyName":"Open SSF Signed Releases Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This determines if the project cryptographically signs release artefacts.", + "scheduled_policy":false, + "scriptId":"27", + "variables":"", + "conditionName":"Open SSF Signed Releases Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"28", + "orgId":"1", + "policyName":"Open SSF Token Permissions Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This Determines Whether the project automated workflow tokens follow the principle of least privilege.", + "scheduled_policy":false, + "scriptId":"28", + "variables":"", + "conditionName":"Open SSF Token Permissions Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"29", + "orgId":"1", + "policyName":"Open SSF Vulnerabilities Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"The Project Has Open, Unfixed Vulnerabilities in its Own codebase.", + "scheduled_policy":false, + "scriptId":"29", + "variables":"", + "conditionName":"Open SSF Vulnerabilities Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"30", + "orgId":"1", + "policyName":"Open SSF Webhooks Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check determines whether the webhook defined in the repository has a token configured to authenticate the origins of requests.", + "scheduled_policy":false, + "scriptId":"30", + "variables":"", + "conditionName":"Open SSF Webhooks Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"31", + "orgId":"1", + "policyName":"Open SSF Binary Artifacts Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"This check determines whether the project has generated executable artifacts in the source repository.", + "scheduled_policy":false, + "scriptId":"31", + "variables":"", + "conditionName":"Open SSF Binary Artifacts Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"32", + "orgId":"1", + "policyName":"Restricted Repository Access: Internal Authorization Only", + "category":"Git Security Posture", + "stage":"source", + "description":"This policy limits repository access to internal personnel only, ensuring secure and controlled information management.", + "scheduled_policy":false, + "scriptId":"32", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"33", + "orgId":"1", + "policyName":"Bot User should not be a Repository Admin", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot Users should not be a Repository Administrator. Bot user is identified using some well-known patterns.", + "scheduled_policy":false, + "scriptId":"33", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"34", + "orgId":"1", + "policyName":"Bot User should not be a Org Owner", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot User should not be a Org Owner. Bot user is identified using some well-known patterns.", + "scheduled_policy":false, + "scriptId":"34", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"35", + "orgId":"1", + "policyName":"Build Webhook Authenticated Protection Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Webhooks used in workflows should be protected/authenticated.", + "scheduled_policy":false, + "scriptId":"35", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"36", + "orgId":"1", + "policyName":"Build Webhook SSL/TLS Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Webhooks should use SSL/TLS.", + "scheduled_policy":false, + "scriptId":"36", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"37", + "orgId":"1", + "policyName":"Build Server Origin Check", + "category":"Build Security Posture", + "stage":"build", + "description":"Build Server Origin Check is a policy that ensures artifacts originate from approved build servers for secure deployments.", + "scheduled_policy":false, + "scriptId":"37", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"38", + "orgId":"1", + "policyName":"Pre-Deployment Checksum Verify", + "category":"Artifact Integrity", + "stage":"artifact", + "description":"Pre-Deployment Checksum Verify is a security policy that validates artifact integrity by comparing build-time checksums with Docker checksums, ensuring trusted and unaltered artifacts are used for deployment.", + "scheduled_policy":false, + "scriptId":"38", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"39", + "orgId":"1", + "policyName":"Cloud Artifact should match the build artifact by hash", + "category":"Artifact Integrity", + "stage":"deploy", + "description":"An image hash not matched to a build artifact may indicate a compromise of the cloud account. An unauthorized application may be running in your organizations cloud.", + "scheduled_policy":false, + "scriptId":"39", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"40", + "orgId":"1", + "policyName":"Repository License Inclusion Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should contain licence files", + "scheduled_policy":false, + "scriptId":"40", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"41", + "orgId":"1", + "policyName":"Approved Artifact Repo Origin", + "category":"Artifact Integrity", + "stage":"artifact", + "description":"Approved Artifact Repo Origin policy validates artifacts from authorized repositories, ensuring secure deployments.", + "scheduled_policy":false, + "scriptId":"41", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"42", + "orgId":"1", + "policyName":"Open SSF Aggregate Score Policy", + "category":"OpenSSF Scorecard", + "stage":"source", + "description":"The project might have known security vulnerabilities that have not been adequately addressed", + "scheduled_policy":false, + "scriptId":"42", + "variables":"", + "conditionName":"Open SSF Aggregate Score Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"43", + "orgId":"1", + "policyName":"SonarQube Reliability Rating D Policy", + "category":"SAST", + "stage":"source", + "description":"This policy aims to promptly resolve reliability issues identified with a Grade D rating in SonarQube. It focuses on enhancing and sustaining code reliability to ensure the codebase operates consistently and reliably.", + "scheduled_policy":false, + "scriptId":"43", + "variables":"", + "conditionName":"SonarQube Reliability Rating D Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"44", + "orgId":"1", + "policyName":"SonarQube Reliability Rating C Policy", + "category":"SAST", + "stage":"source", + "description":"This policy aims to promptly resolve reliability issues identified with a Grade C rating in SonarQube. It focuses on enhancing and sustaining code reliability to ensure the codebase operates consistently and reliably.", + "scheduled_policy":false, + "scriptId":"44", + "variables":"", + "conditionName":"SonarQube Reliability Rating C Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"45", + "orgId":"1", + "policyName":"SonarQube Reliability Rating B Policy", + "category":"SAST", + "stage":"source", + "description":"This policy aims to promptly resolve reliability issues identified with a Grade B rating in SonarQube. It focuses on enhancing and sustaining code reliability to ensure the codebase operates consistently and reliably.", + "scheduled_policy":false, + "scriptId":"45", + "variables":"", + "conditionName":"SonarQube Reliability Rating B Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"46", + "orgId":"1", + "policyName":"Block Container Without Limits", + "category":"Cloud Security", + "stage":"deploy", + "description":"Requires containers to have memory and CPU limits set and constrains limits to be within the specified maximum values.", + "scheduled_policy":false, + "scriptId":"46", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"47", + "orgId":"1", + "policyName":"Block Container Without Request Limit", + "category":"Cloud Security", + "stage":"deploy", + "description":"Requires containers to have memory and CPU requests set and constrains requests to be within the specified maximum values.", + "scheduled_policy":false, + "scriptId":"47", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"48", + "orgId":"1", + "policyName":"SEMGREP High Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of high-severity findings in SEMGREP analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"48", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"49", + "orgId":"1", + "policyName":"SEMGREP Medium Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of medium-severity findings in SEMGREP analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"49", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"50", + "orgId":"1", + "policyName":"Block Undefined Container Ratios", + "category":"Cloud Security", + "stage":"deploy", + "description":"Sets a maximum ratio for container resource limits to requests.", + "scheduled_policy":false, + "scriptId":"50", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"51", + "orgId":"1", + "policyName":"SAST Integration Validation Policy", + "category":"SAST", + "stage":"source", + "description":"Ensures atleast one SAST tool is configured for Source Repo.", + "scheduled_policy":false, + "scriptId":"51", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"52", + "orgId":"1", + "policyName":"SEMGREP Low Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of low-severity findings in SEMGREP analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"52", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"53", + "orgId":"1", + "policyName":"Pod Security Allow Privilege Escalation", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls restricting escalation to root privileges.", + "scheduled_policy":false, + "scriptId":"53", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"54", + "orgId":"1", + "policyName":"Pod Security App Armor", + "category":"Pod Security", + "stage":"deploy", + "description":"Configures an allow-list of AppArmor profiles for use by containers.", + "scheduled_policy":false, + "scriptId":"54", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"55", + "orgId":"1", + "policyName":"Pod Security Capabilities", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls Linux capabilities on containers.", + "scheduled_policy":false, + "scriptId":"55", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"56", + "orgId":"1", + "policyName":"Pod Security Flex Volumes", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls the allowlist of FlexVolume drivers.", + "scheduled_policy":false, + "scriptId":"56", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"57", + "orgId":"1", + "policyName":"Pod Security Forbidden Sysctl", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls the sysctl profile used by containers.", + "scheduled_policy":false, + "scriptId":"57", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"58", + "orgId":"1", + "policyName":"Pod Security FS Group", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls allocating an FSGroup that owns the Pods volumes.", + "scheduled_policy":false, + "scriptId":"58", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"59", + "orgId":"1", + "policyName":"Pod Security Host Filesystem", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls usage of the host filesystem.", + "scheduled_policy":false, + "scriptId":"59", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"60", + "orgId":"1", + "policyName":"Pod Security Host Namespace", + "category":"Pod Security", + "stage":"deploy", + "description":"Disallows sharing of host PID and IPC namespaces by pod containers.", + "scheduled_policy":false, + "scriptId":"60", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"61", + "orgId":"1", + "policyName":"Pod Security Host Network", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls usage of host network namespace by pod containers. Specific ports must be specified.", + "scheduled_policy":false, + "scriptId":"61", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"62", + "orgId":"1", + "policyName":"Pod Security Privileged Container", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls the ability of any container to enable privileged mode.", + "scheduled_policy":false, + "scriptId":"62", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"63", + "orgId":"1", + "policyName":"Pod Security Proc Mount", + "category":"Pod Security", + "stage":"deploy", + "description":"Controls the allowed procMount types for the container.", + "scheduled_policy":false, + "scriptId":"63", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"64", + "orgId":"1", + "policyName":"Pod Security Read Only Root FS", + "category":"Pod Security", + "stage":"deploy", + "description":"Requires the use of a read-only root file system by pod containers.", + "scheduled_policy":false, + "scriptId":"64", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"65", + "orgId":"1", + "policyName":"Pod Security Volume Types", + "category":"Pod Security", + "stage":"deploy", + "description":"Restricts mountable volume types to those specified by the user.", + "scheduled_policy":false, + "scriptId":"65", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"66", + "orgId":"1", + "policyName":"SonarQube Quality Gate Policy", + "category":"SAST", + "stage":"source", + "description":"The purpose of this policy is to comply with SonarQube quality gates, ensuring that code meets predefined quality and performance standards. It emphasizes the importance of continuous code improvement and adherence to best practices.", + "scheduled_policy":false, + "scriptId":"66", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"67", + "orgId":"1", + "policyName":"SonarQube Maintanability Rating E Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is dedicated to the timely resolution of maintainability issues identified with a Grade E rating in SonarQube. It aims to enhance and sustain code maintainability, streamlining future development efforts.", + "scheduled_policy":false, + "scriptId":"67", + "variables":"", + "conditionName":"SonarQube Maintanability Rating E Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"68", + "orgId":"1", + "policyName":"SonarQube Maintanability Rating D Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is dedicated to the timely resolution of maintainability issues identified with a Grade D rating in SonarQube. It aims to enhance and sustain code maintainability, streamlining future development efforts.", + "scheduled_policy":false, + "scriptId":"68", + "variables":"", + "conditionName":"SonarQube Maintanability Rating D Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"69", + "orgId":"1", + "policyName":"SonarQube Maintanability Rating C Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is dedicated to the timely resolution of maintainability issues identified with a Grade C rating in SonarQube. It aims to enhance and sustain code maintainability, streamlining future development efforts.", + "scheduled_policy":false, + "scriptId":"69", + "variables":"", + "conditionName":"SonarQube Maintanability Rating C Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"70", + "orgId":"1", + "policyName":"SonarQube Maintanability Rating B Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is dedicated to the timely resolution of maintainability issues identified with a Grade B rating in SonarQube. It aims to enhance and sustain code maintainability, streamlining future development efforts.", + "scheduled_policy":false, + "scriptId":"70", + "variables":"", + "conditionName":"SonarQube Maintanability Rating B Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"71", + "orgId":"1", + "policyName":"SonarQube Security Rating E Policy", + "category":"SAST", + "stage":"source", + "description":"This policy directs efforts towards improving code security when assigned a Grade E rating in SonarQube. It emphasizes the critical need to fortify the codebase against security threats, protecting sensitive data and preventing potential exploits.", + "scheduled_policy":false, + "scriptId":"71", + "variables":"", + "conditionName":"SonarQube Security Rating E Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"72", + "orgId":"1", + "policyName":"SonarQube Security Rating D Policy", + "category":"SAST", + "stage":"source", + "description":"This policy directs efforts towards improving code security when assigned a Grade D rating in SonarQube. It emphasizes the critical need to fortify the codebase against security threats, protecting sensitive data and preventing potential exploits.", + "scheduled_policy":false, + "scriptId":"72", + "variables":"", + "conditionName":"SonarQube Security Rating D Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"73", + "orgId":"1", + "policyName":"SonarQube Security Rating C Policy", + "category":"SAST", + "stage":"source", + "description":"This policy directs efforts towards improving code security when assigned a Grade C rating in SonarQube. It emphasizes the critical need to fortify the codebase against security threats, protecting sensitive data and preventing potential exploits.", + "scheduled_policy":false, + "scriptId":"73", + "variables":"", + "conditionName":"SonarQube Security Rating C Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"74", + "orgId":"1", + "policyName":"SonarQube Security Rating B Policy", + "category":"SAST", + "stage":"source", + "description":"This policy directs efforts towards improving code security when assigned a Grade B rating in SonarQube. It emphasizes the critical need to fortify the codebase against security threats, protecting sensitive data and preventing potential exploits.", + "scheduled_policy":false, + "scriptId":"74", + "variables":"", + "conditionName":"SonarQube Security Rating B Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"75", + "orgId":"1", + "policyName":"SonarQube Reliability Rating E Policy", + "category":"SAST", + "stage":"source", + "description":"This policy aims to promptly resolve reliability issues identified with a Grade E rating in SonarQube. It focuses on enhancing and sustaining code reliability to ensure the codebase operates consistently and reliably.", + "scheduled_policy":false, + "scriptId":"75", + "variables":"", + "conditionName":"SonarQube Reliability Rating E Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"76", + "orgId":"1", + "policyName":"High Vulnerability Prevention Policy", + "category":"Vulnerability Analysis", + "stage":"artifact", + "description":"High Severity Vulnerabilities should not be found in the artifact", + "scheduled_policy":true, + "scriptId":"76", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"77", + "orgId":"1", + "policyName":"CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"77", + "variables":"", + "conditionName":"CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"78", + "orgId":"1", + "policyName":"CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"78", + "variables":"", + "conditionName":"CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"79", + "orgId":"1", + "policyName":"CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"79", + "variables":"", + "conditionName":"CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"80", + "orgId":"1", + "policyName":"CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"80", + "variables":"", + "conditionName":"CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"81", + "orgId":"1", + "policyName":"CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"81", + "variables":"", + "conditionName":"CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"82", + "orgId":"1", + "policyName":"CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The scheduler pod specification file controls various parameters that set the behavior of the kube-scheduler service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"82", + "variables":"", + "conditionName":"CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"83", + "orgId":"1", + "policyName":"CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The etcd pod specification file /etc/kubernetes/manifests/etcd.yaml controls various parameters that set the behavior of the etcd service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"83", + "variables":"", + "conditionName":"CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"84", + "orgId":"1", + "policyName":"CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The etcd pod specification file /etc/kubernetes/manifests/etcd.yaml controls various parameters that set the behavior of the etcd service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"84", + "variables":"", + "conditionName":"CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"85", + "orgId":"1", + "policyName":"CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"85", + "variables":"", + "conditionName":"CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"86", + "orgId":"1", + "policyName":"CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"86", + "variables":"", + "conditionName":"CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"87", + "orgId":"1", + "policyName":"CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "scheduled_policy":false, + "scriptId":"87", + "variables":"", + "conditionName":"CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"88", + "orgId":"1", + "policyName":"CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by etcd:etcd.", + "scheduled_policy":false, + "scriptId":"88", + "variables":"", + "conditionName":"CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "suggestion":"" + } + `, + ` + { + "policyId":"89", + "orgId":"1", + "policyName":"CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The admin.conf is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"89", + "variables":"", + "conditionName":"CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "suggestion":"" + } + `, + ` + { + "policyId":"90", + "orgId":"1", + "policyName":"CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The admin.conf file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"90", + "variables":"", + "conditionName":"CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"91", + "orgId":"1", + "policyName":"CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The scheduler.conf file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"91", + "variables":"", + "conditionName":"CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"92", + "orgId":"1", + "policyName":"CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The scheduler.conf file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"92", + "variables":"", + "conditionName":"CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"93", + "orgId":"1", + "policyName":"CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The controller-manager.conf file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"93", + "variables":"", + "conditionName":"CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"94", + "orgId":"1", + "policyName":"CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The controller-manager.conf file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"94", + "variables":"", + "conditionName":"CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"95", + "orgId":"1", + "policyName":"CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"95", + "variables":"", + "conditionName":"CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"96", + "orgId":"1", + "policyName":"CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to 600 or more restrictive to protect their integrity.", + "scheduled_policy":false, + "scriptId":"96", + "variables":"", + "conditionName":"CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"97", + "orgId":"1", + "policyName":"CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to 600 to protect their integrity and confidentiality.", + "scheduled_policy":false, + "scriptId":"97", + "variables":"", + "conditionName":"CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "suggestion":"" + } + `, + ` + { + "policyId":"98", + "orgId":"1", + "policyName":"CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests. If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "scheduled_policy":false, + "scriptId":"98", + "variables":"", + "conditionName":"CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"99", + "orgId":"1", + "policyName":"CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "scheduled_policy":false, + "scriptId":"99", + "variables":"", + "conditionName":"CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "suggestion":"" + } + `, + ` + { + "policyId":"100", + "orgId":"1", + "policyName":"CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful ", + "scheduled_policy":false, + "scriptId":"100", + "variables":"", + "conditionName":"CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "suggestion":"" + } + `, + ` + { + "policyId":"101", + "orgId":"1", + "policyName":"CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The apiserver, by default, does not authenticate itself to the kubelets HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "scheduled_policy":false, + "scriptId":"101", + "variables":"", + "conditionName":"CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"102", + "orgId":"1", + "policyName":"CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching ", + "scheduled_policy":false, + "scriptId":"102", + "variables":"", + "conditionName":"CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"103", + "orgId":"1", + "policyName":"CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "scheduled_policy":false, + "scriptId":"103", + "variables":"", + "conditionName":"CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "suggestion":"" + } + `, + ` + { + "policyId":"104", + "orgId":"1", + "policyName":"CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The Node authorization mode only allows kubelets to read Secret, ConfigMap, PersistentVolume, and PersistentVolumeClaim objects associated with their nodes.", + "scheduled_policy":false, + "scriptId":"104", + "variables":"", + "conditionName":"CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "suggestion":"" + } + `, + ` + { + "policyId":"105", + "orgId":"1", + "policyName":"CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Role Based Access Control ", + "scheduled_policy":false, + "scriptId":"105", + "variables":"", + "conditionName":"CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "suggestion":"" + } + `, + ` + { + "policyId":"106", + "orgId":"1", + "policyName":"CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Using EventRateLimit admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept. Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "scheduled_policy":false, + "scriptId":"106", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"107", + "orgId":"1", + "policyName":"CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Setting admission control plugin AlwaysAdmit allows all requests and do not filter any requests. The AlwaysAdmit admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "scheduled_policy":false, + "scriptId":"107", + "variables":"", + "conditionName":"CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "suggestion":"" + } + `, + ` + { + "policyId":"108", + "orgId":"1", + "policyName":"CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Setting admission control policy to AlwaysPullImages forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the images name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "scheduled_policy":false, + "scriptId":"108", + "variables":"", + "conditionName":"CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "suggestion":"" + } + `, + ` + { + "policyId":"109", + "orgId":"1", + "policyName":"CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "scheduled_policy":false, + "scriptId":"109", + "variables":"", + "conditionName":"CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "suggestion":"" + } + `, + ` + { + "policyId":"110", + "orgId":"1", + "policyName":"CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"When you create a pod, if you do not specify a service account, it is automatically assigned the default service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "scheduled_policy":false, + "scriptId":"110", + "variables":"", + "conditionName":"CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "suggestion":"" + } + `, + ` + { + "policyId":"111", + "orgId":"1", + "policyName":"CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Setting admission control policy to NamespaceLifecycle ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "scheduled_policy":false, + "scriptId":"111", + "variables":"", + "conditionName":"CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "suggestion":"" + } + `, + ` + { + "policyId":"112", + "orgId":"1", + "policyName":"CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Using the NodeRestriction plug-in ensures that the kubelet is restricted to the Node and Pod objects that it could modify as defined. Such kubelets will only be allowed to modify their own Node API object, and only modify Pod API objects that are bound to their node.", + "scheduled_policy":false, + "scriptId":"112", + "variables":"", + "conditionName":"CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "suggestion":"" + } + `, + ` + { + "policyId":"113", + "orgId":"1", + "policyName":"CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "scheduled_policy":false, + "scriptId":"113", + "variables":"", + "conditionName":"CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "suggestion":"" + } + `, + ` + { + "policyId":"114", + "orgId":"1", + "policyName":"CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "scheduled_policy":false, + "scriptId":"114", + "variables":"", + "conditionName":"CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"115", + "orgId":"1", + "policyName":"CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "scheduled_policy":false, + "scriptId":"115", + "variables":"", + "conditionName":"CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "suggestion":"" + } + `, + ` + { + "policyId":"116", + "orgId":"1", + "policyName":"CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "scheduled_policy":false, + "scriptId":"116", + "variables":"", + "conditionName":"CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"117", + "orgId":"1", + "policyName":"CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "scheduled_policy":false, + "scriptId":"117", + "variables":"", + "conditionName":"CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"118", + "orgId":"1", + "policyName":"CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "scheduled_policy":false, + "scriptId":"118", + "variables":"", + "conditionName":"CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"119", + "orgId":"1", + "policyName":"CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the users connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "scheduled_policy":false, + "scriptId":"119", + "variables":"", + "conditionName":"CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"120", + "orgId":"1", + "policyName":"CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"If --service-account-lookup is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "scheduled_policy":false, + "scriptId":"120", + "variables":"", + "conditionName":"CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"121", + "orgId":"1", + "policyName":"CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"By default, if no --service-account-key-file is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with --service-account-key-file.", + "scheduled_policy":false, + "scriptId":"121", + "variables":"", + "conditionName":"CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"122", + "orgId":"1", + "policyName":"CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "scheduled_policy":false, + "scriptId":"122", + "variables":"", + "conditionName":"CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"123", + "orgId":"1", + "policyName":"CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "scheduled_policy":false, + "scriptId":"123", + "variables":"", + "conditionName":"CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"124", + "orgId":"1", + "policyName":"CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If --client-ca-file argument is set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.", + "scheduled_policy":false, + "scriptId":"124", + "variables":"", + "conditionName":"CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"125", + "orgId":"1", + "policyName":"CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "scheduled_policy":false, + "scriptId":"125", + "variables":"", + "conditionName":"CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"126", + "orgId":"1", + "policyName":"CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "scheduled_policy":false, + "scriptId":"126", + "variables":"", + "conditionName":"CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"127", + "orgId":"1", + "policyName":"CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Where etcd encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the aescbc, kms and secretbox are likely to be appropriate options.", + "scheduled_policy":false, + "scriptId":"127", + "variables":"", + "conditionName":"CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "suggestion":"" + } + `, + ` + { + "policyId":"128", + "orgId":"1", + "policyName":"CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "scheduled_policy":false, + "scriptId":"128", + "variables":"", + "conditionName":"CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "suggestion":"" + } + `, + ` + { + "policyId":"129", + "orgId":"1", + "policyName":"CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "scheduled_policy":false, + "scriptId":"129", + "variables":"", + "conditionName":"CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"130", + "orgId":"1", + "policyName":"CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "scheduled_policy":false, + "scriptId":"130", + "variables":"", + "conditionName":"CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"131", + "orgId":"1", + "policyName":"CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The controller manager creates a service account per controller in the kube-system namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the --use-service-account-credentials to true runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "scheduled_policy":false, + "scriptId":"131", + "variables":"", + "conditionName":"CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"132", + "orgId":"1", + "policyName":"CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with --service-account-private-key-file as appropriate.", + "scheduled_policy":false, + "scriptId":"132", + "variables":"", + "conditionName":"CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"133", + "orgId":"1", + "policyName":"CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Processes running within pods that need to contact the API server must verify the API servers serving certificate. Failing to do so could be a subject to man-in-the-middle attacks. Providing the root certificate for the API servers serving certificate to the controller manager with the --root-ca-file argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "scheduled_policy":false, + "scriptId":"133", + "variables":"", + "conditionName":"CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"134", + "orgId":"1", + "policyName":"CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"RotateKubeletServerCertificate causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool ", + "scheduled_policy":false, + "scriptId":"134", + "variables":"", + "conditionName":"CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"135", + "orgId":"1", + "policyName":"CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the clusters attack surface.", + "scheduled_policy":false, + "scriptId":"135", + "variables":"", + "conditionName":"CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "suggestion":"" + } + `, + ` + { + "policyId":"136", + "orgId":"1", + "policyName":"CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "scheduled_policy":false, + "scriptId":"136", + "variables":"", + "conditionName":"CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"137", + "orgId":"1", + "policyName":"CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the clusters attack surface.", + "scheduled_policy":false, + "scriptId":"137", + "variables":"", + "conditionName":"CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "suggestion":"" + } + `, + ` + { + "policyId":"138", + "orgId":"1", + "policyName":"CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "scheduled_policy":false, + "scriptId":"138", + "variables":"", + "conditionName":"CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"139", + "orgId":"1", + "policyName":"CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "scheduled_policy":false, + "scriptId":"139", + "variables":"", + "conditionName":"CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"140", + "orgId":"1", + "policyName":"CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "scheduled_policy":false, + "scriptId":"140", + "variables":"", + "conditionName":"CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"141", + "orgId":"1", + "policyName":"CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "scheduled_policy":false, + "scriptId":"141", + "variables":"", + "conditionName":"CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"142", + "orgId":"1", + "policyName":"CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "scheduled_policy":false, + "scriptId":"142", + "variables":"", + "conditionName":"CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"143", + "orgId":"1", + "policyName":"CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "scheduled_policy":false, + "scriptId":"143", + "variables":"", + "conditionName":"CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"144", + "orgId":"1", + "policyName":"CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only. Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "scheduled_policy":false, + "scriptId":"144", + "variables":"", + "conditionName":"CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "suggestion":"" + } + `, + ` + { + "policyId":"145", + "orgId":"1", + "policyName":"CIS-3.2.1 Ensure that a minimal audit policy is created", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes can audit the details of requests made to the API server. The --audit-policy-file flag must be set for this logging to be enabled.", + "scheduled_policy":false, + "scriptId":"145", + "variables":"", + "conditionName":"CIS-3.2.1 Ensure that a minimal audit policy is created", + "suggestion":"" + } + `, + ` + { + "policyId":"146", + "orgId":"1", + "policyName":"CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "scheduled_policy":false, + "scriptId":"146", + "variables":"", + "conditionName":"CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "suggestion":"" + } + `, + ` + { + "policyId":"147", + "orgId":"1", + "policyName":"CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet service file controls various parameters that set the behavior of the kubelet service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"147", + "variables":"", + "conditionName":"CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"148", + "orgId":"1", + "policyName":"CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet service file controls various parameters that set the behavior of the kubelet service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"148", + "variables":"", + "conditionName":"CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"149", + "orgId":"1", + "policyName":"CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kube-proxy kubeconfig file controls various parameters of the kube-proxy service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. It is possible to run kube-proxy with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "scheduled_policy":false, + "scriptId":"149", + "variables":"", + "conditionName":"CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"150", + "orgId":"1", + "policyName":"CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubeconfig file for kube-proxy controls various parameters for the kube-proxy service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"150", + "variables":"", + "conditionName":"CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"151", + "orgId":"1", + "policyName":"CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet.conf file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"151", + "variables":"", + "conditionName":"CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"152", + "orgId":"1", + "policyName":"CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet.conf file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"152", + "variables":"", + "conditionName":"CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"153", + "orgId":"1", + "policyName":"CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"153", + "variables":"", + "conditionName":"CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"154", + "orgId":"1", + "policyName":"CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"154", + "variables":"", + "conditionName":"CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"155", + "orgId":"1", + "policyName":"CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet reads various parameters, including security settings, from a config file specified by the --config argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "scheduled_policy":false, + "scriptId":"155", + "variables":"", + "conditionName":"CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "suggestion":"" + } + `, + ` + { + "policyId":"156", + "orgId":"1", + "policyName":"CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The kubelet reads various parameters, including security settings, from a config file specified by the --config argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "scheduled_policy":false, + "scriptId":"156", + "variables":"", + "conditionName":"CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "suggestion":"" + } + `, + ` + { + "policyId":"157", + "orgId":"1", + "policyName":"CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "scheduled_policy":false, + "scriptId":"157", + "variables":"", + "conditionName":"CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"158", + "orgId":"1", + "policyName":"CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "scheduled_policy":false, + "scriptId":"158", + "variables":"", + "conditionName":"CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "suggestion":"" + } + `, + ` + { + "policyId":"159", + "orgId":"1", + "policyName":"CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "scheduled_policy":false, + "scriptId":"159", + "variables":"", + "conditionName":"CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"160", + "orgId":"1", + "policyName":"CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "scheduled_policy":false, + "scriptId":"160", + "variables":"", + "conditionName":"CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "suggestion":"" + } + `, + ` + { + "policyId":"161", + "orgId":"1", + "policyName":"CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports. Note: By default, --streaming-connection-idle-timeout is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "scheduled_policy":false, + "scriptId":"161", + "variables":"", + "conditionName":"CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "suggestion":"" + } + `, + ` + { + "policyId":"162", + "orgId":"1", + "policyName":"CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "scheduled_policy":false, + "scriptId":"162", + "variables":"", + "conditionName":"CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"163", + "orgId":"1", + "policyName":"CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "scheduled_policy":false, + "scriptId":"163", + "variables":"", + "conditionName":"CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"164", + "orgId":"1", + "policyName":"CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "scheduled_policy":false, + "scriptId":"164", + "variables":"", + "conditionName":"CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "suggestion":"" + } + `, + ` + { + "policyId":"165", + "orgId":"1", + "policyName":"CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "scheduled_policy":false, + "scriptId":"165", + "variables":"", + "conditionName":"CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "suggestion":"" + } + `, + ` + { + "policyId":"166", + "orgId":"1", + "policyName":"CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "scheduled_policy":false, + "scriptId":"166", + "variables":"", + "conditionName":"CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "suggestion":"" + } + `, + ` + { + "policyId":"167", + "orgId":"1", + "policyName":"CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The --rotate-certificates setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad. Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool ", + "scheduled_policy":false, + "scriptId":"167", + "variables":"", + "conditionName":"CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "suggestion":"" + } + `, + ` + { + "policyId":"168", + "orgId":"1", + "policyName":"CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"RotateKubeletServerCertificate causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool ", + "scheduled_policy":false, + "scriptId":"168", + "variables":"", + "conditionName":"CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "suggestion":"" + } + `, + ` + { + "policyId":"169", + "orgId":"1", + "policyName":"CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "scheduled_policy":false, + "scriptId":"169", + "variables":"", + "conditionName":"CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "suggestion":"" + } + `, + ` + { + "policyId":"170", + "orgId":"1", + "policyName":"CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as cluster-admin provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as cluster-admin allow super-user access to perform any action on any resource. When used in a ClusterRoleBinding, it gives full control over every resource in the cluster and in all namespaces. When used in a RoleBinding, it gives full control over every resource in the rolebindings namespace, including the namespace itself.", + "scheduled_policy":false, + "scriptId":"170", + "variables":"", + "conditionName":"CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "suggestion":"" + } + `, + ` + { + "policyId":"171", + "orgId":"1", + "policyName":"CIS-5.1.2 Minimize access to secrets", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "scheduled_policy":false, + "scriptId":"171", + "variables":"", + "conditionName":"CIS-5.1.2 Minimize access to secrets", + "suggestion":"" + } + `, + ` + { + "policyId":"172", + "orgId":"1", + "policyName":"CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "scheduled_policy":false, + "scriptId":"172", + "variables":"", + "conditionName":"CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "suggestion":"" + } + `, + ` + { + "policyId":"173", + "orgId":"1", + "policyName":"CIS-5.1.4 Minimize access to create pods", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "scheduled_policy":false, + "scriptId":"173", + "variables":"", + "conditionName":"CIS-5.1.4 Minimize access to create pods", + "suggestion":"" + } + `, + ` + { + "policyId":"174", + "orgId":"1", + "policyName":"CIS-5.1.5 Ensure that default service accounts are not actively used", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "scheduled_policy":false, + "scriptId":"174", + "variables":"", + "conditionName":"CIS-5.1.5 Ensure that default service accounts are not actively used", + "suggestion":"" + } + `, + ` + { + "policyId":"175", + "orgId":"1", + "policyName":"CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster. Avoiding mounting these tokens removes this attack avenue.", + "scheduled_policy":false, + "scriptId":"175", + "variables":"", + "conditionName":"CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "suggestion":"" + } + `, + ` + { + "policyId":"176", + "orgId":"1", + "policyName":"CIS-5.1.8 Limit use of the Bind", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level. Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "scheduled_policy":false, + "scriptId":"176", + "variables":"", + "conditionName":"CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "suggestion":"" + } + `, + ` + { + "policyId":"177", + "orgId":"1", + "policyName":"CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "scheduled_policy":false, + "scriptId":"177", + "variables":"", + "conditionName":"CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "suggestion":"" + } + `, + ` + { + "policyId":"178", + "orgId":"1", + "policyName":"CIS-5.2.2 Minimize the admission of privileged containers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices. There should be at least one admission control policy defined which does not permit privileged containers. If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"178", + "variables":"", + "conditionName":"CIS-5.2.2 Minimize the admission of privileged containers", + "suggestion":"" + } + `, + ` + { + "policyId":"179", + "orgId":"1", + "policyName":"CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A container running in the hosts PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container. There should be at least one admission control policy defined which does not permit containers to share the host PID namespace. If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"179", + "variables":"", + "conditionName":"CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "suggestion":"" + } + `, + ` + { + "policyId":"180", + "orgId":"1", + "policyName":"CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A container running in the hosts IPC namespace can use IPC to interact with processes outside the container. There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace. If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"180", + "variables":"", + "conditionName":"CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "suggestion":"" + } + `, + ` + { + "policyId":"181", + "orgId":"1", + "policyName":"CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A container running in the hosts network namespace could access the local loopback device, and could access network traffic to and from other pods. There should be at least one admission control policy defined which does not permit containers to share the host network namespace. If you need to run containers which require access to the hosts network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"181", + "variables":"", + "conditionName":"CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "suggestion":"" + } + `, + ` + { + "policyId":"182", + "orgId":"1", + "policyName":"CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A container running with the allowPrivilegeEscalation flag set to true may have processes that can gain more privileges than their parent. There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists ", + "scheduled_policy":false, + "scriptId":"182", + "variables":"", + "conditionName":"CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "suggestion":"" + } + `, + ` + { + "policyId":"183", + "orgId":"1", + "policyName":"CIS-5.2.7 Minimize the admission of root containers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout. Ideally, all containers should run as a defined non-UID 0 user. There should be at least one admission control policy defined which does not permit root containers. If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"183", + "variables":"", + "conditionName":"CIS-5.2.7 Minimize the admission of root containers", + "suggestion":"" + } + `, + ` + { + "policyId":"184", + "orgId":"1", + "policyName":"CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET_RAW capability is enabled which may be misused by malicious containers. Ideally, all containers should drop this capability. There should be at least one admission control policy defined which does not permit containers with the NET_RAW capability. If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"184", + "variables":"", + "conditionName":"CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "suggestion":"" + } + `, + ` + { + "policyId":"185", + "orgId":"1", + "policyName":"CIS-5.2.9 Minimize the admission of containers with added capabilities", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks. There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching. If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"185", + "variables":"", + "conditionName":"CIS-5.2.9 Minimize the admission of containers with added capabilities", + "suggestion":"" + } + `, + ` + { + "policyId":"186", + "orgId":"1", + "policyName":"CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user. In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "scheduled_policy":false, + "scriptId":"186", + "variables":"", + "conditionName":"CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "suggestion":"" + } + `, + ` + { + "policyId":"187", + "orgId":"1", + "policyName":"CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A Windows container making use of the hostProcess flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit hostProcess Windows containers.\n\n If you need to run Windows containers which require hostProcess, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"187", + "variables":"", + "conditionName":"CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "suggestion":"" + } + `, + ` + { + "policyId":"188", + "orgId":"1", + "policyName":"CIS-5.2.12 Minimize the admission of HostPath volumes", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A container which mounts a hostPath volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of hostPath volumes may allow containers access to privileged areas of the node filesystem. There should be at least one admission control policy defined which does not permit containers to mount hostPath volumes. If you need to run containers which require hostPath volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"188", + "variables":"", + "conditionName":"CIS-5.2.12 Minimize the admission of HostPath volumes", + "suggestion":"" + } + `, + ` + { + "policyId":"189", + "orgId":"1", + "policyName":"CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Host ports connect containers directly to the hosts network. This can bypass controls such as network policy. There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts. If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "scheduled_policy":false, + "scriptId":"189", + "variables":"", + "conditionName":"CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "suggestion":"" + } + `, + ` + { + "policyId":"190", + "orgId":"1", + "policyName":"CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "scheduled_policy":false, + "scriptId":"190", + "variables":"", + "conditionName":"CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "suggestion":"" + } + `, + ` + { + "policyId":"191", + "orgId":"1", + "policyName":"CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints. Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "scheduled_policy":false, + "scriptId":"191", + "variables":"", + "conditionName":"CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "suggestion":"" + } + `, + ` + { + "policyId":"192", + "orgId":"1", + "policyName":"CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "scheduled_policy":false, + "scriptId":"192", + "variables":"", + "conditionName":"CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "suggestion":"" + } + `, + ` + { + "policyId":"193", + "orgId":"1", + "policyName":"CIS-5.4.2 Consider external secret storage", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "scheduled_policy":false, + "scriptId":"193", + "variables":"", + "conditionName":"CIS-5.4.2 Consider external secret storage", + "suggestion":"" + } + `, + ` + { + "policyId":"194", + "orgId":"1", + "policyName":"CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called default. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "scheduled_policy":false, + "scriptId":"194", + "variables":"", + "conditionName":"CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "suggestion":"" + } + `, + ` + { + "policyId":"195", + "orgId":"1", + "policyName":"CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "scheduled_policy":false, + "scriptId":"195", + "variables":"", + "conditionName":"CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "suggestion":"" + } + `, + ` + { + "policyId":"196", + "orgId":"1", + "policyName":"CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "scheduled_policy":false, + "scriptId":"196", + "variables":"", + "conditionName":"CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "suggestion":"" + } + `, + ` + { + "policyId":"197", + "orgId":"1", + "policyName":"CIS-5.7.4 The default namespace should not be used", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "scheduled_policy":false, + "scriptId":"197", + "variables":"", + "conditionName":"CIS-5.7.4 The default namespace should not be used", + "suggestion":"" + } + `, + ` + { + "policyId":"198", + "orgId":"1", + "policyName":"C-0002 - MITRE - Exec into container", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers who have permissions, can run malicious commands in containers in the cluster using exec command. In this method, attackers can use legitimate images, such as an OS image as a backdoor container, and run their malicious code remotely by using kubectl exec.", + "scheduled_policy":false, + "scriptId":"198", + "variables":"", + "conditionName":"C-0002 - MITRE - Exec into container", + "suggestion":"" + } + `, + ` + { + "policyId":"199", + "orgId":"1", + "policyName":"C-0007 - MITRE - Data Destruction", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "scheduled_policy":false, + "scriptId":"199", + "variables":"", + "conditionName":"C-0007 - MITRE - Data Destruction", + "suggestion":"" + } + `, + ` + { + "policyId":"200", + "orgId":"1", + "policyName":"C-0012 - MITRE - Applications credentials in configuration files", + "category":"Cloud Security", + "stage":"deploy", + "description":"Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developers endpoint, can steal the stored secrets and use them.", + "scheduled_policy":false, + "scriptId":"200", + "variables":"", + "conditionName":"C-0012 - MITRE - Applications credentials in configuration files", + "suggestion":"" + } + `, + ` + { + "policyId":"201", + "orgId":"1", + "policyName":"C-0014 - MITRE - Access Kubernetes dashboard", + "category":"Cloud Security", + "stage":"deploy", + "description":"The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboards identity.", + "scheduled_policy":false, + "scriptId":"201", + "variables":"", + "conditionName":"C-0014 - MITRE - Access Kubernetes dashboard", + "suggestion":"" + } + `, + ` + { + "policyId":"202", + "orgId":"1", + "policyName":"C-0015 - MITRE - List Kubernetes secrets", + "category":"Cloud Security", + "stage":"deploy", + "description":"Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server can access sensitive information that might include credentials to various services.", + "scheduled_policy":false, + "scriptId":"202", + "variables":"", + "conditionName":"C-0015 - MITRE - List Kubernetes secrets", + "suggestion":"" + } + `, + ` + { + "policyId":"203", + "orgId":"1", + "policyName":"C-0020 - MITRE - Mount service principal", + "category":"Cloud Security", + "stage":"deploy", + "description":"When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "scheduled_policy":false, + "scriptId":"203", + "variables":"", + "conditionName":"C-0020 - MITRE - Mount service principal", + "suggestion":"" + } + `, + ` + { + "policyId":"204", + "orgId":"1", + "policyName":"C-0021 - MITRE - Exposed sensitive interfaces", + "category":"Cloud Security", + "stage":"deploy", + "description":"Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore dont require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.Note, this control is configurable. See below the details.", + "scheduled_policy":false, + "scriptId":"204", + "variables":"", + "conditionName":"C-0021 - MITRE - Exposed sensitive interfaces", + "suggestion":"" + } + `, + ` + { + "policyId":"205", + "orgId":"1", + "policyName":"C-0026 - MITRE - Kubernetes CronJob", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "scheduled_policy":false, + "scriptId":"205", + "variables":"", + "conditionName":"C-0026 - MITRE - Kubernetes CronJob", + "suggestion":"" + } + `, + ` + { + "policyId":"206", + "orgId":"1", + "policyName":"C-0031 - MITRE - Delete Kubernetes events ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events by using kubectl delete events–all in an attempt to avoid detection of their activity in the cluster.", + "scheduled_policy":false, + "scriptId":"206", + "variables":"", + "conditionName":"C-0031 - MITRE - Delete Kubernetes events ", + "suggestion":"" + } + `, + ` + { + "policyId":"207", + "orgId":"1", + "policyName":"C-0035 - MITRE - Cluster-admin binding ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Role-based access control is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "scheduled_policy":false, + "scriptId":"207", + "variables":"", + "conditionName":"C-0035 - MITRE - Cluster-admin binding ", + "suggestion":"" + } + `, + ` + { + "policyId":"208", + "orgId":"1", + "policyName":"C-0036 - MITRE - Validate Validating admission controller ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "scheduled_policy":false, + "scriptId":"208", + "variables":"", + "conditionName":"C-0036 - MITRE - Validate Validating admission controller ", + "suggestion":"" + } + `, + ` + { + "policyId":"209", + "orgId":"1", + "policyName":"C-0037 - MITRE - CoreDNS poisoning ", + "category":"Cloud Security", + "stage":"deploy", + "description":"CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "scheduled_policy":false, + "scriptId":"209", + "variables":"", + "conditionName":"C-0037 - MITRE - CoreDNS poisoning ", + "suggestion":"" + } + `, + ` + { + "policyId":"210", + "orgId":"1", + "policyName":"C-0039 - MITRE - Validate Mutating admission controller ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "scheduled_policy":false, + "scriptId":"210", + "variables":"", + "conditionName":"C-0039 - MITRE - Validate Mutating admission controller ", + "suggestion":"" + } + `, + ` + { + "policyId":"211", + "orgId":"1", + "policyName":"C-0042 - MITRE - SSH server running inside container ", + "category":"Cloud Security", + "stage":"deploy", + "description":"SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods such as phishing, they can use it to get remote access to the container by SSH.", + "scheduled_policy":false, + "scriptId":"211", + "variables":"", + "conditionName":"C-0042 - MITRE - SSH server running inside container ", + "suggestion":"" + } + `, + ` + { + "policyId":"212", + "orgId":"1", + "policyName":"C-0045 - MITRE - Writable hostPath mount ", + "category":"Cloud Security", + "stage":"deploy", + "description":"hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "scheduled_policy":false, + "scriptId":"212", + "variables":"", + "conditionName":"C-0045 - MITRE - Writable hostPath mount ", + "suggestion":"" + } + `, + ` + { + "policyId":"213", + "orgId":"1", + "policyName":"C-0048 - MITRE - HostPath mount ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "scheduled_policy":false, + "scriptId":"213", + "variables":"", + "conditionName":"C-0048 - MITRE - HostPath mount ", + "suggestion":"" + } + `, + ` + { + "policyId":"214", + "orgId":"1", + "policyName":"C-0052 - MITRE - Instance Metadata API ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node.", + "scheduled_policy":false, + "scriptId":"214", + "variables":"", + "conditionName":"C-0052 - MITRE - Instance Metadata API ", + "suggestion":"" + } + `, + ` + { + "policyId":"215", + "orgId":"1", + "policyName":"C-0053 - MITRE - Access container service account ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "scheduled_policy":false, + "scriptId":"215", + "variables":"", + "conditionName":"C-0053 - MITRE - Access container service account ", + "suggestion":"" + } + `, + ` + { + "policyId":"216", + "orgId":"1", + "policyName":"C-0054 - MITRE - Cluster internal networking ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "scheduled_policy":false, + "scriptId":"216", + "variables":"", + "conditionName":"C-0054 - MITRE - Cluster internal networking ", + "suggestion":"" + } + `, + ` + { + "policyId":"217", + "orgId":"1", + "policyName":"C-0057 - MITRE - Privileged container ", + "category":"Cloud Security", + "stage":"deploy", + "description":"A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container ", + "scheduled_policy":false, + "scriptId":"217", + "variables":"", + "conditionName":"C-0057 - MITRE - Privileged container ", + "suggestion":"" + } + `, + ` + { + "policyId":"218", + "orgId":"1", + "policyName":"C-0058 - MITRE - CVE-2021-25741 - Using symlink for arbitrary host file system access ", + "category":"Cloud Security", + "stage":"deploy", + "description":"A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster.", + "scheduled_policy":false, + "scriptId":"218", + "variables":"", + "conditionName":"C-0058 - MITRE - CVE-2021-25741 - Using symlink for arbitrary host file system access", + "suggestion":"" + } + `, + ` + { + "policyId":"219", + "orgId":"1", + "policyName":"C-0059 - MITRE - CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability ", + "category":"Cloud Security", + "stage":"deploy", + "description":"A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster.", + "scheduled_policy":false, + "scriptId":"219", + "variables":"", + "conditionName":"C-0059 - MITRE - CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "suggestion":"" + } + `, + ` + { + "policyId":"220", + "orgId":"1", + "policyName":"C-0066 - MITRE - Secret/etcd encryption enabled ", + "category":"Cloud Security", + "stage":"deploy", + "description":"etcd is a consistent and highly-available key value store used as Kubernetes backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "scheduled_policy":false, + "scriptId":"220", + "variables":"", + "conditionName":"C-0066 - MITRE - Secret/etcd encryption enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"221", + "orgId":"1", + "policyName":"C-0067 - MITRE - Audit logs enabled ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes.", + "scheduled_policy":false, + "scriptId":"221", + "variables":"", + "conditionName":"C-0067 - MITRE - Audit logs enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"222", + "orgId":"1", + "policyName":"C-0068 - MITRE - PSP enabled ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "scheduled_policy":false, + "scriptId":"222", + "variables":"", + "conditionName":"C-0068 - MITRE - PSP enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"223", + "orgId":"1", + "policyName":"C-0069 - MITRE - Disable anonymous access to Kubelet service ", + "category":"Cloud Security", + "stage":"deploy", + "description":"By default, requests to the kubelets HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "scheduled_policy":false, + "scriptId":"223", + "variables":"", + "conditionName":"C-0069 - MITRE - Disable anonymous access to Kubelet service", + "suggestion":"" + } + `, + ` + { + "policyId":"224", + "orgId":"1", + "policyName":"C-0070 - MITRE - Enforce Kubelet client TLS authentication ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "scheduled_policy":false, + "scriptId":"224", + "variables":"", + "conditionName":"C-0070 - MITRE - Enforce Kubelet client TLS authentication", + "suggestion":"" + } + `, + ` + { + "policyId":"225", + "orgId":"1", + "policyName":"C-0002 - NSA - Exec into container", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers who have permissions, can run malicious commands in containers in the cluster using exec command. In this method, attackers can use legitimate images, such as an OS image as a backdoor container, and run their malicious code remotely by using kubectl exec.", + "scheduled_policy":false, + "scriptId":"225", + "variables":"", + "conditionName":"C-0002 - NSA - Exec into container", + "suggestion":"" + } + `, + ` + { + "policyId":"226", + "orgId":"1", + "policyName":"C-0005 - NSA - API server insecure port is enabled", + "category":"Cloud Security", + "stage":"deploy", + "description":"The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "scheduled_policy":false, + "scriptId":"226", + "variables":"", + "conditionName":"C-0005 - NSA - API server insecure port is enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"227", + "orgId":"1", + "policyName":"C-0009 - NSA - Resource limits", + "category":"Cloud Security", + "stage":"deploy", + "description":"CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "scheduled_policy":false, + "scriptId":"227", + "variables":"", + "conditionName":"C-0009 - NSA - Resource limits", + "suggestion":"" + } + `, + ` + { + "policyId":"228", + "orgId":"1", + "policyName":"C-0012 - NSA - Applications credentials in configuration files", + "category":"Cloud Security", + "stage":"deploy", + "description":"Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developers endpoint, can steal the stored secrets and use them.", + "scheduled_policy":false, + "scriptId":"228", + "variables":"", + "conditionName":"C-0012 - NSA - Applications credentials in configuration files", + "suggestion":"" + } + `, + ` + { + "policyId":"229", + "orgId":"1", + "policyName":"C-0013 - NSA - Non-root containers", + "category":"Cloud Security", + "stage":"deploy", + "description":"Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "scheduled_policy":false, + "scriptId":"229", + "variables":"", + "conditionName":"C-0013 - NSA - Non-root containers", + "suggestion":"" + } + `, + ` + { + "policyId":"230", + "orgId":"1", + "policyName":"C-0016 - NSA - Allow privilege escalation", + "category":"Cloud Security", + "stage":"deploy", + "description":"Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "scheduled_policy":false, + "scriptId":"230", + "variables":"", + "conditionName":"C-0016 - NSA - Allow privilege escalation", + "suggestion":"" + } + `, + ` + { + "policyId":"231", + "orgId":"1", + "policyName":"C-0017 - NSA - Immutable container filesystem", + "category":"Cloud Security", + "stage":"deploy", + "description":"By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container.", + "scheduled_policy":false, + "scriptId":"231", + "variables":"", + "conditionName":"C-0017 - NSA - Immutable container filesystem", + "suggestion":"" + } + `, + ` + { + "policyId":"232", + "orgId":"1", + "policyName":"C-0030 - NSA - Ingress and Egress blocked", + "category":"Cloud Security", + "stage":"deploy", + "description":"Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pods namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "scheduled_policy":false, + "scriptId":"232", + "variables":"", + "conditionName":"C-0030 - NSA - Ingress and Egress blocked", + "suggestion":"" + } + `, + ` + { + "policyId":"233", + "orgId":"1", + "policyName":"C-0034 - NSA - Automatic mapping of service account", + "category":"Cloud Security", + "stage":"deploy", + "description":"Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account dont disable automount.", + "scheduled_policy":false, + "scriptId":"233", + "variables":"", + "conditionName":"C-0034 - NSA - Automatic mapping of service account", + "suggestion":"" + } + `, + ` + { + "policyId":"234", + "orgId":"1", + "policyName":"C-0035 - NSA - Cluster-admin binding", + "category":"Cloud Security", + "stage":"deploy", + "description":"Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "scheduled_policy":false, + "scriptId":"234", + "variables":"", + "conditionName":"C-0035 - NSA - Cluster-admin binding", + "suggestion":"" + } + `, + ` + { + "policyId":"235", + "orgId":"1", + "policyName":"C-0038 - NSA - Host PID/IPC privileges", + "category":"Cloud Security", + "stage":"deploy", + "description":"Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "scheduled_policy":false, + "scriptId":"235", + "variables":"", + "conditionName":"C-0038 - NSA - Host PID/IPC privileges", + "suggestion":"" + } + `, + ` + { + "policyId":"236", + "orgId":"1", + "policyName":"C-0041 - NSA - HostNetwork access", + "category":"Cloud Security", + "stage":"deploy", + "description":"Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "scheduled_policy":false, + "scriptId":"236", + "variables":"", + "conditionName":"C-0041 - NSA - HostNetwork access", + "suggestion":"" + } + `, + ` + { + "policyId":"237", + "orgId":"1", + "policyName":"C-0044 - NSA - Container hostPort", + "category":"Cloud Security", + "stage":"deploy", + "description":"Workloads that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods — Kubernetes reschedules them to a different node if available.", + "scheduled_policy":false, + "scriptId":"237", + "variables":"", + "conditionName":"C-0044 - NSA - Container hostPort", + "suggestion":"" + } + `, + ` + { + "policyId":"238", + "orgId":"1", + "policyName":"C-0046 - NSA - Insecure capabilities", + "category":"Cloud Security", + "stage":"deploy", + "description":"Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "scheduled_policy":false, + "scriptId":"238", + "variables":"", + "conditionName":"C-0046 - NSA - Insecure capabilities", + "suggestion":"" + } + `, + ` + { + "policyId":"239", + "orgId":"1", + "policyName":"C-0054 - NSA - Cluster internal networking", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "scheduled_policy":false, + "scriptId":"239", + "variables":"", + "conditionName":"C-0054 - NSA - Cluster internal networking", + "suggestion":"" + } + `, + ` + { + "policyId":"240", + "orgId":"1", + "policyName":"C-0055 - NSA - Linux hardening", + "category":"Cloud Security", + "stage":"deploy", + "description":"In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux, AppArmor, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default.", + "scheduled_policy":false, + "scriptId":"240", + "variables":"", + "conditionName":"C-0055 - NSA - Linux hardening", + "suggestion":"" + } + `, + ` + { + "policyId":"241", + "orgId":"1", + "policyName":"C-0057 - NSA - Privileged container", + "category":"Cloud Security", + "stage":"deploy", + "description":"A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container ", + "scheduled_policy":false, + "scriptId":"241", + "variables":"", + "conditionName":"C-0057 - NSA - Privileged container", + "suggestion":"" + } + `, + ` + { + "policyId":"242", + "orgId":"1", + "policyName":"C-0058 - NSA - CVE-2021-25741 - Using symlink for arbitrary host file system access", + "category":"Cloud Security", + "stage":"deploy", + "description":"A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster.", + "scheduled_policy":false, + "scriptId":"242", + "variables":"", + "conditionName":"C-0058 - NSA - CVE-2021-25741 - Using symlink for arbitrary host file system access", + "suggestion":"" + } + `, + ` + { + "policyId":"243", + "orgId":"1", + "policyName":"C-0059 - NSA - CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "category":"Cloud Security", + "stage":"deploy", + "description":"Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster.", + "scheduled_policy":false, + "scriptId":"243", + "variables":"", + "conditionName":"C-0059 - NSA - CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "suggestion":"" + } + `, + ` + { + "policyId":"244", + "orgId":"1", + "policyName":"C-0066 - NSA - Secret/etcd encryption enabled", + "category":"Cloud Security", + "stage":"deploy", + "description":"etcd is a consistent and highly-available key value store used as Kubernetes backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "scheduled_policy":false, + "scriptId":"244", + "variables":"", + "conditionName":"C-0066 - NSA - Secret/etcd encryption enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"245", + "orgId":"1", + "policyName":"C-0067 - NSA - Audit logs enabled", + "category":"Cloud Security", + "stage":"deploy", + "description":"Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes.", + "scheduled_policy":false, + "scriptId":"245", + "variables":"", + "conditionName":"C-0067 - NSA - Audit logs enabled", + "suggestion":"" + } + `, + ` + { + "policyId":"246", + "orgId":"1", + "policyName":"C-0068 - NSA - PSP enabled ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "scheduled_policy":false, + "scriptId":"246", + "variables":"", + "conditionName":"C-0068 - NSA - PSP enabled ", + "suggestion":"" + } + `, + ` + { + "policyId":"247", + "orgId":"1", + "policyName":"C-0069 - NSA - Disable anonymous access to Kubelet service ", + "category":"Cloud Security", + "stage":"deploy", + "description":"By default, requests to the kubelets HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "scheduled_policy":false, + "scriptId":"247", + "variables":"", + "conditionName":"C-0069 - NSA - Disable anonymous access to Kubelet service ", + "suggestion":"" + } + `, + ` + { + "policyId":"248", + "orgId":"1", + "policyName":"C-0070 - NSA - Enforce Kubelet client TLS authentication ", + "category":"Cloud Security", + "stage":"deploy", + "description":"Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "scheduled_policy":false, + "scriptId":"248", + "variables":"", + "conditionName":"C-0070 - NSA - Enforce Kubelet client TLS authentication ", + "suggestion":"" + } + `, + ` + { + "policyId":"249", + "orgId":"1", + "policyName":"CIS - Compliance Score - Range: 70-85 ", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Overall CIS Complaince Score found between 70-85.", + "scheduled_policy":false, + "scriptId":"249", + "variables":"", + "conditionName":"CIS - Compliance Score - Range: 70-85", + "suggestion":"" + } + `, + ` + { + "policyId":"250", + "orgId":"1", + "policyName":"CIS - Compliance Score - Range: 50-70 ", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Overall CIS Complaince Score found between 50-70.", + "scheduled_policy":false, + "scriptId":"250", + "variables":"", + "conditionName":"CIS - Compliance Score - Range: 50-70", + "suggestion":"" + } + `, + ` + { + "policyId":"251", + "orgId":"1", + "policyName":"CIS - Compliance Score - Range: 30-50 ", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Overall CIS Complaince Score found between 30-50.", + "scheduled_policy":false, + "scriptId":"251", + "variables":"", + "conditionName":"CIS - Compliance Score - Range: 30-50", + "suggestion":"" + } + `, + ` + { + "policyId":"252", + "orgId":"1", + "policyName":"CIS - Compliance Score - Range: 0-30 ", + "category":"CIS-Benchmark", + "stage":"deploy", + "description":"Overall CIS Complaince Score found below 30.", + "scheduled_policy":false, + "scriptId":"252", + "variables":"", + "conditionName":"CIS - Compliance Score - Range: 0-30", + "suggestion":"" + } + `, + ` + { + "policyId":"253", + "orgId":"1", + "policyName":"MITRE - Compliance Score - Range: 70-85 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall MITRE Complaince Score found between 70-85.", + "scheduled_policy":false, + "scriptId":"253", + "variables":"", + "conditionName":"MITRE - Compliance Score - Range: 70-85", + "suggestion":"" + } + `, + ` + { + "policyId":"254", + "orgId":"1", + "policyName":"MITRE - Compliance Score - Range: 50-70 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall MITRE Complaince Score found between 50-70.", + "scheduled_policy":false, + "scriptId":"254", + "variables":"", + "conditionName":"MITRE - Compliance Score - Range: 50-70", + "suggestion":"" + } + `, + ` + { + "policyId":"255", + "orgId":"1", + "policyName":"MITRE - Compliance Score - Range: 30-50 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall MITRE Complaince Score found between 30-50.", + "scheduled_policy":false, + "scriptId":"255", + "variables":"", + "conditionName":"MITRE - Compliance Score - Range: 30-50", + "suggestion":"" + } + `, + ` + { + "policyId":"256", + "orgId":"1", + "policyName":"MITRE - Compliance Score - Range: 0-30 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall MITRE Complaince Score found below 30.", + "scheduled_policy":false, + "scriptId":"256", + "variables":"", + "conditionName":"MITRE - Compliance Score - Range: 0-30", + "suggestion":"" + } + `, + ` + { + "policyId":"257", + "orgId":"1", + "policyName":"NSA - Compliance Score - Range: 70-85 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall NSA Complaince Score found between 70-85.", + "scheduled_policy":false, + "scriptId":"257", + "variables":"", + "conditionName":"NSA - Compliance Score - Range: 70-85", + "suggestion":"" + } + `, + ` + { + "policyId":"258", + "orgId":"1", + "policyName":"NSA - Compliance Score - Range: 50-70 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall NSA Complaince Score found between 50-70.", + "scheduled_policy":false, + "scriptId":"258", + "variables":"", + "conditionName":"NSA - Compliance Score - Range: 50-70", + "suggestion":"" + } + `, + ` + { + "policyId":"259", + "orgId":"1", + "policyName":"NSA - Compliance Score - Range: 30-50 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall NSA Complaince Score found between 30-50.", + "scheduled_policy":false, + "scriptId":"259", + "variables":"", + "conditionName":"NSA - Compliance Score - Range: 30-50", + "suggestion":"" + } + `, + ` + { + "policyId":"260", + "orgId":"1", + "policyName":"NSA - Compliance Score - Range: 0-30 ", + "category":"Compliance", + "stage":"deploy", + "description":"Overall NSA Complaince Score found below 30.", + "scheduled_policy":false, + "scriptId":"260", + "variables":"", + "conditionName":"NSA - Compliance Score - Range: 0-30", + "suggestion":"" + } + `, + ` + { + "policyId":"261", + "orgId":"1", + "policyName":"Auto-merge should be disabled", + "category":"Git Security Posture", + "stage":"source", + "description":"Auto-merge should not be allowed in code repository.", + "scheduled_policy":false, + "scriptId":"261", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"262", + "orgId":"1", + "policyName":"Deploy to Production should be preceeded by Judgements Spinnaker", + "category":"Deployment Config", + "stage":"deploy", + "description":"Deployments to sensitive environments should have a manual review and judgement stage in pipeline requiring someone to approve deployment.", + "scheduled_policy":false, + "scriptId":"262", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"263", + "orgId":"1", + "policyName":"Open to merge public repositories for code utilities", + "category":"Git Security Posture", + "stage":"source", + "description":"Dependencies in code should be secure and protected from unauthorized code changes.", + "scheduled_policy":false, + "scriptId":"263", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"264", + "orgId":"1", + "policyName":"Approved user for build trigger", + "category":"Build Security Posture", + "stage":"build", + "description":"Only approved users should be allowed to trigger builds.", + "scheduled_policy":false, + "scriptId":"264", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"265", + "orgId":"1", + "policyName":"Refrain from running pipelines originating from forked repos", + "category":"Git Security Posture", + "stage":"source", + "description":"Forks of original repositories should not be able to trigger pipelines.", + "scheduled_policy":false, + "scriptId":"265", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"266", + "orgId":"1", + "policyName":"Bot user cannot merge the code", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot users must not be capable of merging any pull requests.", + "scheduled_policy":false, + "scriptId":"266", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"267", + "orgId":"1", + "policyName":"Admin access privilege should be with less than 5 percent users", + "category":"Git Security Posture", + "stage":"source", + "description":"Only 5% of overall set of users must have admin access over code repository.", + "scheduled_policy":false, + "scriptId":"267", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"268", + "orgId":"1", + "policyName":"Inactive users Access restriction policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Users who have been inactive for more than 3 months must not have access to code repository.", + "scheduled_policy":false, + "scriptId":"268", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"269", + "orgId":"1", + "policyName":"Prohibited use of unspecified package versions", + "category":"Git Security Posture", + "stage":"source", + "description":"Unspecified Package versions can results in fetching uncertified latest package versions. It should be mandatory to pull only specific version except for latest as artifacts and dependencies.", + "scheduled_policy":false, + "scriptId":"269", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"270", + "orgId":"1", + "policyName":"Centralized package manager settings", + "category":"Git Security Posture", + "stage":"source", + "description":"Centralized package manager imposes additional checks on having only secure packages. Thus, having central package managers for code dependencies is important.", + "scheduled_policy":false, + "scriptId":"270", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"271", + "orgId":"1", + "policyName":"Artifacts should be signed", + "category":"Artifact Integrity", + "stage":"artifact", + "description":"Only signed artifact must be allowed for deployment.", + "scheduled_policy":false, + "scriptId":"271", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"272", + "orgId":"1", + "policyName":"Untrusted Deployment via Configuration Drift", + "category":"Deployment Config", + "stage":"deploy", + "description":"Pipeline configuration should be fetched only from trusted sources.", + "scheduled_policy":false, + "scriptId":"272", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"273", + "orgId":"1", + "policyName":"Continuously check for known vulnerabilities", + "category":"Vulnerability Analysis", + "stage":"artifact", + "description":"Continuous check for known vulnerabilities must be enabled in SSD.", + "scheduled_policy":false, + "scriptId":"273", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"274", + "orgId":"1", + "policyName":"High severity secret detection in code repository", + "category":"Secret Scan", + "stage":"source", + "description":"High Severity secrets must not be exposed in code repository.", + "scheduled_policy":false, + "scriptId":"274", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"275", + "orgId":"1", + "policyName":"Critical severity secret detection in code repository", + "category":"Secret Scan", + "stage":"source", + "description":"Critical Severity secrets must not be exposed in code repository.", + "scheduled_policy":false, + "scriptId":"275", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"276", + "orgId":"1", + "policyName":"Medium severity secret detection in code repository", + "category":"Secret Scan", + "stage":"source", + "description":"Medium Severity secrets must not be exposed in code repository.", + "scheduled_policy":false, + "scriptId":"276", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"277", + "orgId":"1", + "policyName":"Low severity secret detection in code repository", + "category":"Secret Scan", + "stage":"source", + "description":"Low Severity secrets must not be exposed in code repository.", + "scheduled_policy":false, + "scriptId":"277", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"278", + "orgId":"1", + "policyName":"High severity secret detection in containers", + "category":"Secret Scan", + "stage":"deploy", + "description":"High Severity secrets must not be exposed in containers.", + "scheduled_policy":false, + "scriptId":"278", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"279", + "orgId":"1", + "policyName":"Critical severity secret detection in containers", + "category":"Secret Scan", + "stage":"deploy", + "description":"Critical Severity secrets must not be exposed in containers.", + "scheduled_policy":false, + "scriptId":"279", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"280", + "orgId":"1", + "policyName":"Medium severity secret detection in containers", + "category":"Secret Scan", + "stage":"deploy", + "description":"Medium Severity secrets must not be exposed in containers.", + "scheduled_policy":false, + "scriptId":"280", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"281", + "orgId":"1", + "policyName":"Low severity secret detection in containers", + "category":"Secret Scan", + "stage":"deploy", + "description":"Low Severity secrets must not be exposed in containers.", + "scheduled_policy":false, + "scriptId":"281", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"282", + "orgId":"1", + "policyName":"High severity secret detection in helm", + "category":"Secret Scan", + "stage":"deploy", + "description":"High Severity secrets must not be exposed in helm.", + "scheduled_policy":false, + "scriptId":"282", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"283", + "orgId":"1", + "policyName":"Critical severity secret detection in helm", + "category":"Secret Scan", + "stage":"deploy", + "description":"Critical Severity secrets must not be exposed in helm.", + "scheduled_policy":false, + "scriptId":"283", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"284", + "orgId":"1", + "policyName":"Medium severity secret detection in helm", + "category":"Secret Scan", + "stage":"deploy", + "description":"Medium Severity secrets must not be exposed in helm.", + "scheduled_policy":false, + "scriptId":"284", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"285", + "orgId":"1", + "policyName":"Low severity secret detection in helm", + "category":"Secret Scan", + "stage":"deploy", + "description":"Low Severity secrets must not be exposed in helm.", + "scheduled_policy":false, + "scriptId":"285", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"286", + "orgId":"1", + "policyName":"Gitlab Repository Access Control Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Code Repository should not be publicly visible or modifiable.", + "scheduled_policy":false, + "scriptId":"286", + "variables":"", + "conditionName":"Repository Access Control Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"287", + "orgId":"1", + "policyName":"Gitlab Minimum Reviewers Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Pushed code should be reviewed by a minimum number of users as defined in the policy.", + "scheduled_policy":false, + "scriptId":"287", + "variables":"", + "conditionName":"Minimum Reviewers Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"288", + "orgId":"1", + "policyName":"Gitlab Branch Protection Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should have branch protection enabled requiring all code changes to be reviewed. This means disabling Push events and requiring Pull/Merge Requests to have code reviews.", + "scheduled_policy":false, + "scriptId":"288", + "variables":"", + "conditionName":"Branch Protection Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"289", + "orgId":"1", + "policyName":"Gitlab Bot User should not be a Repo Admin", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot User should not be a Repo Admin.", + "scheduled_policy":false, + "scriptId":"289", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"290", + "orgId":"1", + "policyName":"Gitlab SECURITY.md file should be present", + "category":"Git Security Posture", + "stage":"source", + "description":"SECURITY.md file should be present in code repository.", + "scheduled_policy":false, + "scriptId":"290", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"291", + "orgId":"1", + "policyName":"Gitlab Repository 2FA Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should be protected based on 2FA authentication", + "scheduled_policy":false, + "scriptId":"291", + "variables":"", + "conditionName":"Repository 2FA Policy", + "suggestion":"" + } + `, + ` + { + "policyId":"292", + "orgId":"1", + "policyName":"Gitlab Build Webhook SSL/TLS Policy", + "category":"Git Security Posture", + "stage":"build", + "description":"Webhooks should use SSL/TLS.", + "scheduled_policy":false, + "scriptId":"292", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"293", + "orgId":"1", + "policyName":"Deploy to Production should be preceeded by Judgements Argo", + "category":"Deployment Config", + "stage":"deploy", + "description":"Deployments to sensitive environments should have a manual review and judgement stage in pipeline requiring someone to approve deployment.", + "scheduled_policy":false, + "scriptId":"293", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"294", + "orgId":"1", + "policyName":"Deploy to Production should be preceeded by Judgements Jenkins", + "category":"Deployment Config", + "stage":"deploy", + "description":"Deployments to sensitive environments should have a manual review and judgement stage in pipeline requiring someone to approve deployment.", + "scheduled_policy":false, + "scriptId":"294", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"295", + "orgId":"1", + "policyName":"BitBucket Repository Access Control Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Code Repository should not be publicly visible or modifiable.", + "scheduled_policy":false, + "scriptId":"295", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"296", + "orgId":"1", + "policyName":"BitBucket Minimum Reviewers Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Pushed code should be reviewed by a minimum number of users:2 as defined in the policy.", + "scheduled_policy":false, + "scriptId":"296", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"297", + "orgId":"1", + "policyName":"BitBucket Branch Protection Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Repositories should have branch protection enabled requiring all code changes to be reviewed. This means disabling Push events and requiring Pull/Merge Requests to have code reviews.", + "scheduled_policy":false, + "scriptId":"297", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"298", + "orgId":"1", + "policyName":"BitBucket Branch Deletion Prevention Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"While the default branch can’t be deleted directly even if the setting is on, in general, it is best practice to prevent branches from being deleted by anyone with write access.", + "scheduled_policy":false, + "scriptId":"298", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"299", + "orgId":"1", + "policyName":"BitBucket Bot user cannot merge the code", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot users must not be capable of merging any pull requests.", + "scheduled_policy":false, + "scriptId":"299", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"300", + "orgId":"1", + "policyName":"BitBucket Bot User should not be a Repo Admin", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot User should not be a Repo Admin.", + "scheduled_policy":false, + "scriptId":"300", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"301", + "orgId":"1", + "policyName":"BitBucket Bot User should not be an Org Owner", + "category":"Git Security Posture", + "stage":"source", + "description":"Bot User should not be an Org Owner.", + "scheduled_policy":false, + "scriptId":"301", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"302", + "orgId":"1", + "policyName":"BitBucket Auto-merge should be disabled", + "category":"Git Security Posture", + "stage":"source", + "description":"Auto-merge should not be allowed in code repository.", + "scheduled_policy":false, + "scriptId":"302", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"303", + "orgId":"1", + "policyName":"BitBucket Single Owner of Organization", + "category":"Git Security Posture", + "stage":"source", + "description":"To reduce the attack surface it is recommended to have more than 1 admin of an organization or workspace.", + "scheduled_policy":false, + "scriptId":"303", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"304", + "orgId":"1", + "policyName":"BitBucket Admin access privilege should be with less than 5 percent users", + "category":"Git Security Posture", + "stage":"source", + "description":"Only 5% of overall set of users must have admin access over code repository.", + "scheduled_policy":false, + "scriptId":"304", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"305", + "orgId":"1", + "policyName":"BitBucket Webhook Usage Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Webhook provide secure way of consuming events from source repository. Thus, webhooks must be used for integration with other platforms.", + "scheduled_policy":false, + "scriptId":"305", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"306", + "orgId":"1", + "policyName":"BitBucket Webhook SSL/TLS Protection Policy", + "category":"Git Security Posture", + "stage":"source", + "description":"Webhooks should use SSL/TLS.", + "scheduled_policy":false, + "scriptId":"306", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"307", + "orgId":"1", + "policyName":"Snyk Code Scan - High Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of high-severity findings in Snyk Code Scan analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"307", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"308", + "orgId":"1", + "policyName":"Snyk Code Scan - Medium Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of medium-severity findings in Snyk Code Scan analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"308", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"309", + "orgId":"1", + "policyName":"Snyk Code Scan - Low Severity Findings Policy", + "category":"SAST", + "stage":"source", + "description":"This policy is designed to ensure timely identification, assessment, and resolution of low-severity findings in Snyk Code Scan analysis. It outlines the procedures and responsibilities for addressing issues that could pose significant risks to code quality and security.", + "scheduled_policy":false, + "scriptId":"309", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"310", + "orgId":"1", + "policyName":"Code License Scan - License Association Policy", + "category":"License Scan", + "stage":"source", + "description":"This policy is designed to ensure association of appropriate licenses with source code repository.", + "scheduled_policy":false, + "scriptId":"310", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"311", + "orgId":"1", + "policyName":"Code License Scan - Low Severity License Association Policy", + "category":"License Scan", + "stage":"source", + "description":"This policy is designed to restrict association of low severity licenses with source code repository.", + "scheduled_policy":false, + "scriptId":"311", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"312", + "orgId":"1", + "policyName":"Code License Scan - Medium Severity License Association Policy", + "category":"License Scan", + "stage":"source", + "description":"This policy is designed to restrict association of medium or unknown severity licenses with source code repository.", + "scheduled_policy":false, + "scriptId":"312", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"313", + "orgId":"1", + "policyName":"Code License Scan - High Severity License Association Policy", + "category":"License Scan", + "stage":"source", + "description":"This policy is designed to restrict association of high severity licenses with source code repository.", + "scheduled_policy":false, + "scriptId":"313", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"314", + "orgId":"1", + "policyName":"Code License Scan - Critical Severity License Association Policy", + "category":"License Scan", + "stage":"source", + "description":"This policy is designed to restrict association of critical severity licenses with source code repository.", + "scheduled_policy":false, + "scriptId":"314", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"315", + "orgId":"1", + "policyName":"Artifact License Scan - License Association Policy", + "category":"License Scan", + "stage":"artifact", + "description":"This policy is designed to ensure association of appropriate licenses with artifact and its components.", + "scheduled_policy":false, + "scriptId":"315", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"316", + "orgId":"1", + "policyName":"Artifact License Scan - Low Severity License Association Policy", + "category":"License Scan", + "stage":"artifact", + "description":"This policy is designed to restrict association of low severity licenses with artifact and its components.", + "scheduled_policy":false, + "scriptId":"316", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"317", + "orgId":"1", + "policyName":"Artifact License Scan - Medium Severity License Association Policy", + "category":"License Scan", + "stage":"artifact", + "description":"This policy is designed to restrict association of medium or unknown severity licenses with artifact and its components.", + "scheduled_policy":false, + "scriptId":"317", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"318", + "orgId":"1", + "policyName":"Artifact License Scan - High Severity License Association Policy", + "category":"License Scan", + "stage":"artifact", + "description":"This policy is designed to restrict association of high severity licenses with artifact and its components.", + "scheduled_policy":false, + "scriptId":"318", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"319", + "orgId":"1", + "policyName":"Artifact License Scan - Critical Severity License Association Policy", + "category":"License Scan", + "stage":"artifact", + "description":"This policy is designed to restrict association of critical severity licenses with artifact and its components.", + "scheduled_policy":false, + "scriptId":"319", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"320", + "orgId":"1", + "policyName":"Virus Total Scan - Malicious URL in Code or Configuration Policy", + "category":"Code Security", + "stage":"source", + "description":"This policy is designed to restrict usage of any malicious URL in source code repository or configuration files.", + "scheduled_policy":false, + "scriptId":"320", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"321", + "orgId":"1", + "policyName":"Virus Total Scan - Suspicious URL in Code or Configuration Policy", + "category":"Code Security", + "stage":"source", + "description":"This policy is designed to restrict usage of any suspicious URL in source code repository or configuration files.", + "scheduled_policy":false, + "scriptId":"321", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"322", + "orgId":"1", + "policyName":"Github Actions Secret Management Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure all sensitive data is stored as secrets and not hardcoded in workflows.", + "scheduled_policy":false, + "scriptId":"322", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"323", + "orgId":"1", + "policyName":"Github Actions Approved Actions Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Only use approved GitHub Actions from a whitelist of trusted sources.", + "scheduled_policy":false, + "scriptId":"323", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"324", + "orgId":"1", + "policyName":"Github Actions Dependency Management Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure dependencies are checked and managed securely. This policy verifies that dependencies are fetched from trusted sources and validate checksums where applicable.", + "scheduled_policy":false, + "scriptId":"324", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"325", + "orgId":"1", + "policyName":"Github Actions Workflow Trigger Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure workflows are triggered securely to prevent abuse. This policy verifies that workflows are triggered on specific branches and events, and not on arbitrary pushes or pull requests.", + "scheduled_policy":false, + "scriptId":"325", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"326", + "orgId":"1", + "policyName":"Github Actions Secure Communication Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure secure communication channels are used within workflows. This policy verifies that all network communications within workflows use secure protocols.", + "scheduled_policy":false, + "scriptId":"326", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"327", + "orgId":"1", + "policyName":"Github Actions Timeout Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure workflows have appropriate timeout settings to prevent runaway processes.", + "scheduled_policy":false, + "scriptId":"327", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, + ` + { + "policyId":"328", + "orgId":"1", + "policyName":"Github Actions Workflow Permissions Policy", + "category":"Build Security Posture", + "stage":"build", + "description":"Ensure workflows has limited permissions over repository.", + "scheduled_policy":false, + "scriptId":"328", + "variables":"", + "conditionName":"", + "suggestion":"" + } + `, +} + +var policyEnforcement = []string{ + `{ + "policyId": "1", + "severity": "Medium", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "2", + "severity": "Critical", + "action": "Alert", + "conditionValue": "2", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "3", + "severity": "Critical", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "github", + "tags": [ + "1", + "11", + "13", + "17" + ] + }`, + `{ + "policyId": "4", + "severity": "Low", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "github", + "tags": [ + "1", + "11", + "13", + "17" + ] + }`, + `{ + "policyId": "5", + "severity": "Low", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "6", + "severity": "Medium", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "7", + "severity": "Low", + "action": "Alert", + "conditionValue": "LOW", + "status": true, + "datasourceTool": "graphql", + "tags": [ + "17", + "22" + ] + }`, + `{ + "policyId": "8", + "severity": "Critical", + "action": "Alert", + "conditionValue": "CRITICAL", + "status": true, + "datasourceTool": "graphql", + "tags": [ + "17", + "22" + ] + }`, + `{ + "policyId": "9", + "severity": "Medium", + "action": "Alert", + "conditionValue": "MEDIUM", + "status": true, + "datasourceTool": "graphql", + "tags": [ + "17", + "22" + ] + }`, + `{ + "policyId": "10", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "3" + ] + }`, + `{ + "policyId": "11", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "3" + ] + }`, + `{ + "policyId": "12", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "jenkins", + "tags": [ + "2" + ] + }`, + `{ + "policyId": "13", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4", + "11", + "13", + "17" + ] + }`, + `{ + "policyId": "14", + "severity": "Low", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "15", + "severity": "Low", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "16", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "17", + "severity": "Low", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "18", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "19", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "20", + "severity": "Medium", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "21", + "severity": "Low", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "22", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "23", + "severity": "Medium", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "24", + "severity": "Medium", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "25", + "severity": "Medium", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "26", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "27", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "28", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "29", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4", + "17" + ] + }`, + `{ + "policyId": "30", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "31", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "4" + ] + }`, + `{ + "policyId": "32", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "33", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "1", + "11", + "13", + "17" + ] + }`, + `{ + "policyId": "34", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "1", + "11", + "13", + "17" + ] + }`, + `{ + "policyId": "35", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "3" + ] + }`, + `{ + "policyId": "36", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "3" + ] + }`, + `{ + "policyId": "37", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "jenkins", + "tags": [ + "3" + ] + }`, + `{ + "policyId": "38", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "docker", + "tags": [ + "2" + ] + }`, + `{ + "policyId": "38", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "quay", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "38", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "jfrog", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "38", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "ecr", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "39", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "docker", + "tags": [ + "2" + ] + }`, + `{ + "policyId": "39", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "quay", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "39", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "jfrog", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "39", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "ecr", + "tags": [ + "2" + ] +}`, + `{ + "policyId": "40", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "1" + ] + }`, + `{ + "policyId": "41", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "docker", + "tags": [ + "2" + ] + }`, + `{ + "policyId": "42", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5", + "status": true, + "datasourceTool": "openssf", + "tags": [ + "17", + "4" + ] + }`, + `{ + "policyId": "43", + "severity": "Critical", + "action": "Alert", + "conditionValue": "2.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "44", + "severity": "Medium", + "action": "Alert", + "conditionValue": "3.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "45", + "severity": "Low", + "action": "Alert", + "conditionValue": "4.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "46", + "severity": "Medium", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "5", + "7", + "8", + "17" + ] + }`, + `{ + "policyId": "47", + "severity": "Medium", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "5", + "7", + "8", + "17" + ] + }`, + `{ + "policyId": "48", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "semgrep", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "49", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "semgrep", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "50", + "severity": "Low", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "5", + "7", + "8", + "17" + ] + }`, + `{ + "policyId": "51", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "10", + "7", + "8", + "17" + ] + }`, + `{ + "policyId": "51", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "semgrep", + "tags": [ + "5", + "7", + "8", + "17" + ] +}`, + `{ + "policyId": "52", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "semgrep", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "53", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "54", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "55", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "56", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "57", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "58", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "59", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "60", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "61", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "62", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "63", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "64", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "65", + "severity": "Critical", + "action": "Prevent", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "6" + ] + }`, + `{ + "policyId": "66", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "67", + "severity": "Critical", + "action": "Alert", + "conditionValue": "5.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "68", + "severity": "Critical", + "action": "Alert", + "conditionValue": "2.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "69", + "severity": "Medium", + "action": "Alert", + "conditionValue": "3.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "70", + "severity": "Low", + "action": "Alert", + "conditionValue": "4.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "71", + "severity": "Critical", + "action": "Alert", + "conditionValue": "1.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "72", + "severity": "Critical", + "action": "Alert", + "conditionValue": "2.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "73", + "severity": "Medium", + "action": "Alert", + "conditionValue": "3.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "74", + "severity": "Low", + "action": "Alert", + "conditionValue": "4.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "75", + "severity": "Critical", + "action": "Alert", + "conditionValue": "1.0", + "status": true, + "datasourceTool": "sonarqube", + "tags": [ + "12", + "11", + "10" + ] + }`, + `{ + "policyId": "76", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "graphql", + "tags": [ + "17", + "22" + ] + }`, + `{ + "policyId": "77", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "78", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "79", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "80", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "81", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "82", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "83", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "84", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "85", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "86", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "87", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "88", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "89", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "90", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "91", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "92", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "93", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "94", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "95", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "96", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "97", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "98", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "99", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "100", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "101", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "102", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "103", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "104", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "105", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "106", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "107", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "108", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "109", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "110", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "111", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "112", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "113", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "114", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "115", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "116", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "117", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "118", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "119", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "120", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "121", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "122", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "123", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "124", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "125", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "126", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "127", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "128", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "129", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "130", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "131", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "132", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "133", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "134", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "135", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "136", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "137", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "138", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "139", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "140", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "141", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "142", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "143", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "144", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "145", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "146", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "147", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "148", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "149", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "150", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "151", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "152", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "153", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "154", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "155", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "156", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "157", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "158", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "159", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "160", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "161", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "162", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "163", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "164", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "165", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "166", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "167", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "168", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "169", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "170", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "171", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "172", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "173", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "174", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "175", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "176", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "177", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "178", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "179", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "180", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "181", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "182", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "183", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "184", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "185", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "186", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "187", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "188", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "189", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "190", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "191", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "192", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "193", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "194", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "195", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "196", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "197", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "198", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "199", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "200", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "201", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "202", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "203", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "204", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "205", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "206", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "207", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "208", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "209", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "210", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "211", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "212", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "213", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "214", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "215", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "216", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "217", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "218", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "219", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "220", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "221", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "222", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "223", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "224", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "27" + ] + }`, + `{ + "policyId": "225", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "226", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "227", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "228", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "229", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "230", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "231", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "232", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "233", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "234", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "235", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "236", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "237", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "238", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "239", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "240", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "241", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "242", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "243", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "244", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "245", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "246", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "247", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "248", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "27" + ] + }`, + `{ + "policyId": "249", + "severity": "Low", + "action": "Alert", + "conditionValue": "70-85", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "250", + "severity": "Medium", + "action": "Alert", + "conditionValue": "50-70", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "251", + "severity": "High", + "action": "Alert", + "conditionValue": "30-50", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "252", + "severity": "Critical", + "action": "Alert", + "conditionValue": "0-30", + "status": true, + "datasourceTool": "cis-kubescape", + "tags": [ + "14" + ] + }`, + `{ + "policyId": "253", + "severity": "Low", + "action": "Alert", + "conditionValue": "70-85", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "28" + ] + }`, + `{ + "policyId": "254", + "severity": "Medium", + "action": "Alert", + "conditionValue": "50-70", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "28" + ] + }`, + `{ + "policyId": "255", + "severity": "High", + "action": "Alert", + "conditionValue": "30-50", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "28" + ] + }`, + `{ + "policyId": "256", + "severity": "Critical", + "action": "Alert", + "conditionValue": "0-30", + "status": true, + "datasourceTool": "mitre-kubescape", + "tags": [ + "15", + "17", + "28" + ] + }`, + `{ + "policyId": "257", + "severity": "Low", + "action": "Alert", + "conditionValue": "70-85", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "28" + ] + }`, + `{ + "policyId": "258", + "severity": "Medium", + "action": "Alert", + "conditionValue": "50-70", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "28" + ] + }`, + `{ + "policyId": "259", + "severity": "High", + "action": "Alert", + "conditionValue": "30-50", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "28" + ] + }`, + `{ + "policyId": "260", + "severity": "Critical", + "action": "Alert", + "conditionValue": "0-30", + "status": true, + "datasourceTool": "nsa-kubescape", + "tags": [ + "16", + "28" + ] + }`, + `{ + "policyId": "261", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "262", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "spinnaker", + "tags": [ + "18", + "5" + ] + }`, + `{ + "policyId": "263", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "264", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "jenkins", + "tags": [ + "18", + "3" + ] + }`, + `{ + "policyId": "265", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "266", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "267", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "268", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "269", + "status": true, + "action": "Alert", + "severity": "Medium", + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "270", + "status": true, + "action": "Alert", + "severity": "Medium", + "datasourceTool": "github", + "tags": [ + "18", + "1" + ] + }`, + `{ + "policyId": "271", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "docker", + "tags": [ + "18", + "2" + ] + }`, + `{ + "policyId": "271", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "jfrog", + "tags": [ + "18", + "2" + ] +}`, + `{ + "policyId": "271", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "quay", + "tags": [ + "18", + "2" + ] +}`, + `{ + "policyId": "271", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "ecr", + "tags": [ + "18", + "2" + ] +}`, + `{ + "policyId": "272", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "kubernetes", + "tags": [ + "18", + "5" + ] + }`, + `{ + "policyId": "273", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "docker", + "tags": [ + "18", + "22" + ] + }`, + `{ + "policyId": "273", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "quay", + "tags": [ + "18", + "22" + ] +}`, + `{ + "policyId": "273", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "jfrog", + "tags": [ + "18", + "22" + ] +}`, + `{ + "policyId": "273", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "ecr", + "tags": [ + "18", + "22" + ] +}`, + `{ + "policyId": "274", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "19" + ] + }`, + `{ + "policyId": "275", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "19" + ] + }`, + `{ + "policyId": "276", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "19" + ] + }`, + `{ + "policyId": "277", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "19" + ] + }`, + `{ + "policyId": "278", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "20", + "19" + ] + }`, + `{ + "policyId": "279", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "20", + "19" + ] + }`, + `{ + "policyId": "280", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "20", + "19" + ] + }`, + `{ + "policyId": "281", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "20", + "19" + ] + }`, + `{ + "policyId": "282", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "helm", + "tags": [ + "21", + "19" + ] + }`, + `{ + "policyId": "283", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "helm", + "tags": [ + "21", + "19" + ] + }`, + `{ + "policyId": "284", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "helm", + "tags": [ + "21", + "19" + ] + }`, + `{ + "policyId": "285", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "helm", + "tags": [ + "21", + "19" + ] + }`, + `{ + "policyId": "286", + "severity": "Medium", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "287", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "288", + "severity": "Critical", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "289", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "290", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "291", + "severity": "Medium", + "action": "Alert", + "conditionValue": "true", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "292", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "gitlab", + "tags": [ + "23", + "1" + ] + }`, + `{ + "policyId": "293", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "argo", + "tags": [ + "17", + "5" + ] + }`, + `{ + "policyId": "294", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "jenkins", + "tags": [ + "17", + "5" + ] + }`, + `{ + "policyId": "295", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "296", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "297", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "298", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "299", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "300", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "301", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "302", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "303", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "304", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "305", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "306", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "bitbucket", + "tags": [ + "24", + "1" + ] + }`, + `{ + "policyId": "307", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "snyk", + "tags": [ + "10" + ] + }`, + `{ + "policyId": "308", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "snyk", + "tags": [ + "10" + ] + }`, + `{ + "policyId": "309", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "snyk", + "tags": [ + "10" + ] + }`, + `{ + "policyId": "310", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "311", + "severity": "Low", + "action": "Alert", + "status": false, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "312", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "313", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "314", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "315", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "316", + "severity": "Low", + "action": "Alert", + "status": false, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "317", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "318", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "319", + "severity": "Critical", + "action": "Alert", + "status": true, + "datasourceTool": "trivy", + "tags": [ + "25" + ] + }`, + `{ + "policyId": "320", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "virustotal", + "tags": [ + "12", + "26" + ] + }`, + `{ + "policyId": "321", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "virustotal", + "tags": [ + "12", + "26" + ] + }`, + `{ + "policyId": "322", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "323", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "324", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "325", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "326", + "severity": "High", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "327", + "severity": "Low", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, + `{ + "policyId": "328", + "severity": "Medium", + "action": "Alert", + "status": true, + "datasourceTool": "githubactions", + "tags": [ + "3", + "29" + ] + }`, +} + +var tagPolicy = []string{ + `{ + "id": "00", + "tagName": "userdefined", + "tagValue": "User Defined Policies", + "tagDescription": "", + "createdBy": "system" + }`, + `{ + "id": "1", + "tagName": "policyCategory", + "tagValue": "Git Security Posture", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "2", + "tagName": "policyCategory", + "tagValue": "Artifact Integrity", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "3", + "tagName": "policyCategory", + "tagValue": "Build Security Posture", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "4", + "tagName": "policyCategory", + "tagValue": "OpenSSF Scorecard", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "5", + "tagName": "policyCategory", + "tagValue": "Deployment Config", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "6", + "tagName": "policyCategory", + "tagValue": "Pod Security", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "7", + "tagName": "policyCategory", + "tagValue": "NIST-800-53-CM7", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "8", + "tagName": "policyCategory", + "tagValue": "FedRAMP-CM7", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "9", + "tagName": "policyCategory", + "tagValue": "FedRAMP-RA5", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "10", + "tagName": "policyCategory", + "tagValue": "SAST", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "11", + "tagName": "policyCategory", + "tagValue": "NIST-800-53-AC6", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "12", + "tagName": "policyCategory", + "tagValue": "Code Security", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "13", + "tagName": "policyCategory", + "tagValue": "FedRAMP-AC6", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "14", + "tagName": "policyCategory", + "tagValue": "CIS-Benchmark", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "15", + "tagName": "policyCategory", + "tagValue": "MITRE-ATT&CK", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "16", + "tagName": "policyCategory", + "tagValue": "NSA-CISA", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "17", + "tagName": "policyCategory", + "tagValue": "NIST-800-53-R5", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "18", + "tagName": "policyCategory", + "tagValue": "OWASP-CICD-Top10", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "19", + "tagName": "policyCategory", + "tagValue": "Secret Scan", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "20", + "tagName": "policyCategory", + "tagValue": "Artifact Scan", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "21", + "tagName": "policyCategory", + "tagValue": "Helm Scan", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "22", + "tagName": "policyCategory", + "tagValue": "Vulnerability Analysis", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "23", + "tagName": "policyCategory", + "tagValue": "Gitlab", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "24", + "tagName": "policyCategory", + "tagValue": "Bitbucket", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "25", + "tagName": "policyCategory", + "tagValue": "License Scan", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "26", + "tagName": "policyCategory", + "tagValue": "Virus Total Scan", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "27", + "tagName": "policyCategory", + "tagValue": "Cloud Security", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "28", + "tagName": "policyCategory", + "tagValue": "Compliance", + "tagDescription": "", + "createdBy": "system" + } + `, + ` + { + "id": "29", + "tagName": "policyCategory", + "tagValue": "Github Actions", + "tagDescription": "", + "createdBy": "system" + } + `, +} diff --git a/schemas/april2024.go b/schemas/april2024.go new file mode 100644 index 0000000..f266cab --- /dev/null +++ b/schemas/april2024.go @@ -0,0 +1,611 @@ +package schemas + +const April2024Schema = `type SchemaVersion { + version: String! +} + +interface RBAC { + roles: [Role!] +} + +enum RolePermission { + admin + write + read +} + +type Role { + "id is randomly assigned" + id: String! @id + "group should be a URI format that includes a scope or realm" + group: String! @search(by: [hash]) + permission: RolePermission! @search(by: [hash]) +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! @id + name: String! + value: String! +} + + +type Organization implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + roles: [Role!] + teams: [Team!] @hasInverse(field: organization) + environments: [DeploymentTarget!] @hasInverse(field: organization) + policies: [PolicyDefinition!] @hasInverse(field: ownerOrg) + policyEnforcements: [PolicyEnforcement!] + integrators: [Integrator!] @hasInverse(field: organization) + featureModes: [FeatureMode!] @hasInverse(field: organization) +} + +""" +Environment can be things like dev, prod, staging etc. +""" +type Environment { + id: String! @id + organization: Organization! + purpose: String! @search(by: [exact]) +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + ip: String! @search(by: [exact]) + isFirewall: Boolean + organization: Organization! @hasInverse(field: environments) + defaultEnvironment: Environment! +} + + +type Team implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + roles: [Role!] + organization: Organization! @hasInverse(field: teams) + applications: [Application!] + labels: [KeyValue!] + policies: [PolicyDefinition!] @hasInverse(field: ownerTeam) + policyEnforcements: [PolicyEnforcement!] +} + +type Application implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + roles: [Role!] + environments: [ApplicationEnvironment!] @hasInverse(field: application) + team: Team! @hasInverse(field: applications) + policies: [PolicyDefinition!] @hasInverse(field: ownerApplication) + policyEnforcements: [PolicyEnforcement!] @hasInverse(field: enforcedApplication) + metadata: [KeyValue!] +} + + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment @withSubscription { + "id is randomly assigned" + id: String! @id + "environment denotes whether it is dev, prod, staging, non-prod etc" + environment: Environment + application: Application! + deploymentTarget: DeploymentTarget! + namespace: String! @search(by:[exact]) + "toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env" + toolsUsed: String + deployments: [ApplicationDeployment!] @hasInverse(field: applicationEnvironment) + riskStatus: ApplicationRiskStatus @hasInverse(field: applicationEnvironment) + metadata: [KeyValue!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + inprogress +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus @search + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironment! +} + + +""" +DeploymentStage is an enum denoting the stage of the deployment. . +""" +enum DeploymentStage { + "deployment is discovered from the events" + discovered + "deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live" + current + "deployment becomes a past deployment because another fresh deployment has happened" + previous + "deployment is blocked by the firewall" + blocked +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + "id is randomly assigned" + id: String! @id + "artifact that is deployed" + artifact: [Artifact!] + applicationEnvironment: ApplicationEnvironment! + deployedAt: DateTime @search + "deploymentStage is an enum and can be discovered, current, previous or blocked" + deploymentStage: DeploymentStage! @search(by: [exact]) + "source is argo, spinnaker etc" + source: String! + "component would be a service" + component: String! @search(by: [exact]) + "user who deployed the artifact" + deployedBy: String + "toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools" + toolsUsed: ToolsUsed! + "deploymentRisk is the risk status of the deployment" + deploymentRisk: ApplicationDeploymentRisk @hasInverse(field: applicationDeployment) + "policyRunHistory is the policy execution history for this deployment" + policyRunHistory: [RunHistory!] @hasInverse(field: applicationDeployment) +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus + applicationDeployment: ApplicationDeployment! @hasInverse(field: deploymentRisk) +} + + +type Integrator + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + id: String! @id + organization: Organization! + name: String! @search(by: [exact]) + type: String! @search(by: [exact]) + category: String! @search(by: [exact]) + credentials: Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type Credentials { + id: ID! + data: String! + integrator: Integrator! @hasInverse(field: credentials) +} + + +type FeatureMode + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + id: String! @id + organization: Organization! + scan: String! @search(by: [exact]) + type: String! @search(by: [exact]) + enabled: Boolean! + category: String! @search(by: [exact]) + createdAt: DateTime! + updatedAt: DateTime! +} + + + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! @id @search(by:[exact]) + tagName: String! @search(by:[exact]) + tagValue: String! @search(by:[exact]) + tagDescription: String + createdBy: String @search(by:[exact]) + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcement!] @hasInverse(field: tags) +} + +type PolicyDefinition { + id: String! @id + ownerOrg: Organization! + ownerTeam: Team + ownerApplication: Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! @search(by: [exact]) + category: String! @search(by: [exact]) + stage: String! @search(by: [exact]) + description: String! + scheduledPolicy: Boolean! + script: String! + variables: String + conditionName: String + suggestion: String +} + +type PolicyEnforcement { + id: ID! + policy: PolicyDefinition! + #It should be either of the three or else if node is shared it will make changes to 2 different destination, how to enforce that? + enforcedOrg: Organization @hasInverse(field: policyEnforcements) + enforcedTeam: Team @hasInverse(field: policyEnforcements) + enforcedApplication: Application @hasInverse(field: policyEnforcements) + status: Boolean! @search + forceApply: Boolean + severity: Severity! + datasourceTool: String! @search(by: [exact]) + action: String! @search(by: [exact]) + conditionValue: String + environments: [Environment!] + tags: [Tag!] @hasInverse(field: policies) + createdAt: DateTime! + updatedAt: DateTime! +} + +type RunHistory { + id: ID! + policyId: String! @search(by: [exact]) + applicationDeployment: ApplicationDeployment! @hasInverse(field: policyRunHistory) + PolicyName: String! @search(by: [exact]) + Severity: Severity! @search(by: [exact]) + Stage: String! @search(by: [exact]) + Artifact: String! @search(by: [exact]) + ArtifactTag: String! @search(by: [exact]) + ArtifactSha: String! @search(by: [exact]) + ArtifactNameTag: String! @search(by: [exact]) + Status: String! + DatasourceTool: String! + AlertTitle: String @search(by: [exact]) + AlertMessage: String @search(by: [exact]) + Suggestions: String @search(by: [exact]) + Reason: String + CreatedAt: DateTime! @search + UpdatedAt: DateTime! @search + DeployedAt: DateTime! @search + Action: String! @search(by: [exact]) + Hash: String + Error: String @search(by: [exact]) + Pass: Boolean! @search + MetaData: String + FileApi: String + JiraUrl: String + scheduledPolicy: Boolean! @search + policyEnforcements: PolicyEnforcement! +} + + +""" +BuildTool contains data from build tool events. +""" +type BuildTool { + "id is randomly assigned" + id: String! @id + "buildId is a unique job id, run id for a job/pipeline/action" + buildId: String! @search(by: [exact]) + "tool is jenkins etc" + tool: String! @search(by: [exact]) + "buildName is the name of the job/pipeline/action" + buildName: String! @search(by: [exact]) + buildUrl: String! @search(by: [exact]) + artifactType: String @search(by: [exact]) + "artifact would be something like nginx without the tag" + artifact: String! @search(by: [exact]) + "artifactTag would be the tag of the artifact" + artifactTag: String! @search(by: [exact]) + "digest is the sha of the artifact" + digest: String! @search(by: [exact]) + "buildDigest is the sha of the artifact as sent from the build tool" + buildDigest: String @search(by: [exact]) + "artifactNode links a BuildTool node to an artifact" + artifactNode: Artifact + "buildTime is the time at which the artifact was built" + buildTime: DateTime + "buildUser is the user that built the artifact" + buildUser: String + "sourceCodeTool links a BuildTool node to the source details" + sourceCodeTool: SourceCodeTool @hasInverse(field: buildTool) + "commitMetaData links a BuildTool node to the git commit based details" + commitMetaData: [CommitMetaData!] @hasInverse(field: buildTool) + createdAt: DateTime! +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + "id is randomly assigned" + id: String! @id + createdAt: DateTime! + "scm is the scm tool github/gitlab etc" + scm: String! + "repository is the git remote repository" + repository: String! + "branch is the git branch on which the artifact was built" + branch: String! + "headCommit is the checkout out head commit" + headCommit: String + "diffCommits is a comma separated string of the commits between the previous built artifact and the current" + diffCommits: String + licenseName: String + visibility: String + "parentRepo is populated in case the git repo is a fork" + parentRepo: String + buildTool: BuildTool! +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + "id is randomly assigned" + id: ID! + "commit is a git commit that was used to build an artifact" + commit: String + repository: String + "commitSign tells us whether the commit is signed" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildTool! @hasInverse(field: commitMetaData) +} + +type Artifact { + id: String! @id + artifactType: String! @search(by: [exact]) + artifactName: String! @search(by: [exact]) + artifactTag: String! @search(by: [exact]) + artifactSha: String! @search(by: [exact]) + scanData: [ArtifactScanData!] +} + +type ArtifactScanData { + id: String! @id + artifactSha: String! @search(by: [exact]) + tool: String! @search(by: [exact]) + artifactDetails: Artifact @hasInverse(field: scanData) + lastScannedAt: DateTime + components: [Component!] + vulnCriticalCount: Int @search + vulnHighCount: Int @search + vulnMediumCount: Int @search + vulnLowCount: Int @search + vulnInfoCount: Int @search + vulnUnknownCount: Int @search + vulnNoneCount: Int @search + vulnTotalCount: Int @search +} + +type Component { + id: String! @id + type: String! + name: String! @search(by: [exact]) + version: String! @search(by: [exact]) + licenses: [String!] + purl: String @search(by: [exact]) + cpe: String @search(by: [exact]) + scannedAt: DateTime + vulnerabilities: [Vulnerability!] @hasInverse(field: affects) + artifacts: [ArtifactScanData!] @hasInverse(field: components) +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +type Vulnerability { + id: String! @id + parent: String! @search(by: [exact]) + ratings: Severity @search(by: [exact]) + cwes: [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime @search + cvss: [CVSS!] + affects: [Component!] @hasInverse(field: vulnerabilities) +} + +type CWE { + id: String! @id + name: String! + description: String +} + +type CVSS { + type: String + score: String +} + +# Dgraph.Allow-Origin "http://localhost:4200" +# Dgraph.Authorization {"VerificationKey":"","Header":"X-OpsMx-Auth","jwkurl":"http://token-machine:8050/jwk","Namespace":"ssd.opsmx.io","Algo":"","Audience":["ssd.opsmx.io"],"ClosedByDefault":false}` diff --git a/schemas/june2024.go b/schemas/june2024.go new file mode 100644 index 0000000..95f33ee --- /dev/null +++ b/schemas/june2024.go @@ -0,0 +1,646 @@ +package schemas + +const June2024Schema = `type SchemaVersion { + version: String! +} + +interface RBAC { + roles: [Role!] +} + +enum RolePermission { + admin + write + read +} + +type Role { + "id is randomly assigned" + id: String! @id + "group should be a URI format that includes a scope or realm" + group: String! @search(by: [hash]) + permission: RolePermission! @search(by: [hash]) +} + +""" +KeyValue is a generic key/value pair, used as an attribute list or similar. +""" +type KeyValue { + id: String! @id + name: String! + value: String! +} + + +type Organization implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryOrganization @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + roles: [Role!] + teams: [Team!] @hasInverse(field: organization) + environments: [DeploymentTarget!] @hasInverse(field: organization) + policies: [PolicyDefinition!] @hasInverse(field: ownerOrg) + policyEnforcements: [PolicyEnforcement!] + integrators: [Integrator!] @hasInverse(field: organization) + featureModes: [FeatureMode!] @hasInverse(field: organization) +} + +""" +Environment can be things like dev, prod, staging etc. +""" +type Environment { + id: String! @id + organization: Organization! + purpose: String! @search(by: [exact]) +} + +""" +DeploymentTarget describes a single place that things can be deployed into, +such as an AWS account or a Kubernetes cluster. +""" +type DeploymentTarget + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryDeploymentTarget @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact, regexp]) + "this would be the ip/server address of the target environment" + ip: String! @search(by: [exact]) + account: String + "this would be something like aws, gcp etc" + targetType: String + "this would be something like us-east-1 etc" + region: String + kubescapeServiceConnected: String + isFirewall: Boolean + organization: Organization! @hasInverse(field: environments) + defaultEnvironment: Environment! +} + + +type Team implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryTeam @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryTeam @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact]) + roles: [Role!] + organization: Organization! @hasInverse(field: teams) + applications: [Application!] + labels: [KeyValue!] + policies: [PolicyDefinition!] @hasInverse(field: ownerTeam) + policyEnforcements: [PolicyEnforcement!] +} + +type Application implements RBAC + @withSubscription + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryApplication @cascade { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryApplication @cascade { team { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, + ) +{ + "id is randomly assigned" + id: String! @id + name: String! @search(by: [exact, regexp]) + roles: [Role!] + environments: [ApplicationEnvironment!] @hasInverse(field: application) + team: Team! @hasInverse(field: applications) + policies: [PolicyDefinition!] @hasInverse(field: ownerApplication) + policyEnforcements: [PolicyEnforcement!] @hasInverse(field: enforcedApplication) + metadata: [KeyValue!] +} + + +""" +ApplicationEnvironment is a running instance of an application down to the level of a namespace or its non k8s equivalent. +""" +type ApplicationEnvironment @withSubscription { + "id is randomly assigned" + id: String! @id + "environment denotes whether it is dev, prod, staging, non-prod etc" + environment: Environment + application: Application! + deploymentTarget: DeploymentTarget! + namespace: String! @search(by:[exact, regexp]) + "toolsUsed is a comma-separated string that contains all the tools(source, build, artifact, deploy etc) for an app env" + toolsUsed: [String!] + deployments: [ApplicationDeployment!] @hasInverse(field: applicationEnvironment) + riskStatus: ApplicationRiskStatus @hasInverse(field: applicationEnvironment) + metadata: [KeyValue!] +} + +""" +RiskStatus tells us what risk a current application instance or a deployment is at. +""" +enum RiskStatus { + lowrisk + mediumrisk + highrisk + apocalypserisk + scanning +} + +""" +ApplicationRiskStatus tells us about the risk status and alerts for different stages for an application environment. +""" +type ApplicationRiskStatus { + id: ID! + riskStatus: RiskStatus @search(by: [exact,regexp]) + sourceCodeAlerts: Int + buildAlerts: Int + artifactAlerts: Int + deploymentAlerts: Int + createdAt: DateTime! + updatedAt: DateTime! + applicationEnvironment: ApplicationEnvironment! +} + + +""" +DeploymentStage is an enum denoting the stage of the deployment. . +""" +enum DeploymentStage { + "deployment is discovered from the events" + discovered + "scanning is under process" + scanning + "deployment is known to have passed the deployment firewall and the deployment(ie the artifact) is live" + current + "deployment becomes a past deployment because another fresh deployment has happened" + previous + "deployment is blocked by the firewall" + blocked +} + +""" +ApplicationDeployment tells us about the the artifact deployed along with its associated details. +""" +type ApplicationDeployment { + "id is randomly assigned" + id: String! @id + "artifact that is deployed" + artifact: [Artifact!] @hasInverse(field: artifactDeployment) + applicationEnvironment: ApplicationEnvironment! + deployedAt: DateTime @search + "deploymentStage is an enum and can be discovered, current, previous or blocked" + deploymentStage: DeploymentStage! @search(by: [exact]) + "source is argo, spinnaker etc" + source: String! + "component would be a service" + component: String! @search(by: [exact, regexp]) + "user who deployed the artifact" + deployedBy: String + "toolsUsed contains tools of different stages of source, build, artifact and deploy along with some different tools" + toolsUsed: ToolsUsed! + "deploymentRisk is the risk status of the deployment" + deploymentRisk: ApplicationDeploymentRisk @hasInverse(field: applicationDeployment) + "policyRunHistory is the policy execution history for this deployment" + policyRunHistory: [RunHistory!] @hasInverse(field: applicationDeployment) +} + +type ToolsUsed { + id: ID! + source: String + build: String + artifact: String + deploy: String + sbom: String + misc: [String!] +} + +""" +ApplicationDeploymentRisk tells us about the risk status and alerts for different stages for an application deployment. +""" +type ApplicationDeploymentRisk { + id: ID! + sourceCodeAlertsScore: Int + buildAlertsScore: Int + artifactAlertsScore: Int + deploymentAlertsScore: Int + deploymentRiskStatus: RiskStatus @search(by: [exact,regexp]) + applicationDeployment: ApplicationDeployment! @hasInverse(field: deploymentRisk) +} + + +type Integrator + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryIntegrator @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + id: String! @id + organization: Organization! + name: String! @search(by: [exact]) + type: String! @search(by: [exact]) + category: String! @search(by: [exact]) + credentials: Credentials! + createdAt: DateTime! + updatedAt: DateTime! +} + +type Credentials { + id: ID! + data: String! + integrator: Integrator! @hasInverse(field: credentials) +} + + +type FeatureMode + @auth( + query: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}"}, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { teams { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}"}, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { teams { applications { roles(filter: {group: {in: $groups}, permission: {in: [admin,read]}}) { __typename }}}}}}"}, + ]}, + add: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + update: { + or: [ + { rule: "{$type: {eq: \"internal-account/v1\"}}" }, +{ rule: "query($groups: [String!]) { queryFeatureMode @cascade { organization { roles(filter: {group: {in: $groups}, permission: {in: [admin]}}) { __typename }}}}"}, + ]}, + delete: + { rule: "{$type: {eq: \"internal-account/v1\"}}" } + ) +{ + id: String! @id + organization: Organization! + scan: String! @search(by: [exact]) + type: String! @search(by: [exact]) + enabled: Boolean! + category: String! @search(by: [exact]) + createdAt: DateTime! + updatedAt: DateTime! +} + + + +""" +Tag tells us about the tags that are linked to policies and other components. +""" +type Tag { + id: String! @id @search(by:[exact]) + tagName: String! @search(by:[exact]) + tagValue: String! @search(by:[exact]) + tagDescription: String + createdBy: String @search(by:[exact]) + createdAt: DateTime! + updatedAt: DateTime! + policies: [PolicyEnforcement!] @hasInverse(field: tags) +} + +type PolicyDefinition { + id: String! @id + ownerOrg: Organization! + ownerTeam: Team + ownerApplication: Application + createdAt: DateTime! + updatedAt: DateTime! + policyName: String! @search(by: [exact]) + category: String! @search(by: [exact]) + stage: String! @search(by: [exact]) + description: String! @search(by: [exact]) + scheduledPolicy: Boolean! @search + script: String! @search(by: [exact]) + variables: String @search(by: [exact]) + conditionName: String @search(by: [exact]) + suggestion: String @search(by: [exact]) +} + +type PolicyEnforcement { + id: ID! + policy: PolicyDefinition! + #It should be either of the three or else if node is shared it will make changes to 2 different destination, how to enforce that? + enforcedOrg: Organization @hasInverse(field: policyEnforcements) + enforcedTeam: Team @hasInverse(field: policyEnforcements) + enforcedApplication: Application @hasInverse(field: policyEnforcements) + status: Boolean! @search + forceApply: Boolean @search + severity: Severity! + datasourceTool: String! @search(by: [exact]) + action: String! @search(by: [exact]) + conditionValue: String @search(by: [exact]) + environments: [Environment!] + tags: [Tag!] @hasInverse(field: policies) + createdAt: DateTime! + updatedAt: DateTime! +} + +type RunHistory { + id: ID! + policyId: String! @search(by: [exact]) + applicationDeployment: ApplicationDeployment @hasInverse(field: policyRunHistory) + artifactScan: ArtifactScanData @hasInverse(field: artifactRunHistory) + PolicyName: String! @search(by: [exact]) + Stage: String! @search(by: [exact]) + Artifact: String! @search(by: [exact]) + ArtifactTag: String! @search(by: [exact]) + ArtifactSha: String! @search(by: [exact]) + ArtifactNameTag: String! @search(by: [exact,regexp]) + DatasourceTool: String! + CreatedAt: DateTime! @search + UpdatedAt: DateTime! @search + DeployedAt: DateTime! @search + Hash: String + Pass: Boolean! @search + MetaData: String + FileApi: String + scheduledPolicy: Boolean! @search + policyEnforcements: PolicyEnforcement! + securityIssue: SecurityIssue @hasInverse(field: Affects) +} + +type SecurityIssue { + id: ID! + AlertTitle: String @search(by: [exact,regexp]) + AlertMessage: String @search(by: [exact]) + Suggestions: String @search(by: [exact]) + Severity: Severity! @search(by: [exact]) + CreatedAt: DateTime! @search + UpdatedAt: DateTime! @search + Action: String! @search(by: [exact]) + JiraUrl: String + Status: String! @search(by: [exact]) + Reason: String @search(by: [exact]) + Error: String @search(by: [exact]) + Affects: [RunHistory!] @hasInverse(field: securityIssue) +} + + +""" +BuildTool contains data from build tool events. +""" +type BuildTool { + "id is randomly assigned" + id: String! @id + "buildId is a unique job id, run id for a job/pipeline/action" + buildId: String! @search(by: [exact,regexp]) + "tool is jenkins etc" + tool: String! @search(by: [exact]) + "buildName is the name of the job/pipeline/action" + buildName: String! @search(by: [exact, regexp]) + buildUrl: String! @search(by: [exact]) + artifactType: String @search(by: [exact]) + "artifact would be something like nginx without the tag" + artifact: String! @search(by: [exact]) + "artifactTag would be the tag of the artifact" + artifactTag: String! @search(by: [exact]) + "digest is the sha of the artifact" + digest: String! @search(by: [exact]) + "buildDigest is the sha of the artifact as sent from the build tool" + buildDigest: String @search(by: [exact]) + "artifactNode links a BuildTool node to an artifact" + artifactNode: Artifact @hasInverse(field: buildDetails) + "buildTime is the time at which the artifact was built" + buildTime: DateTime + "buildUser is the user that built the artifact" + buildUser: String + "sourceCodeTool links a BuildTool node to the source details" + sourceCodeTool: SourceCodeTool @hasInverse(field: buildTool) + "commitMetaData links a BuildTool node to the git commit based details" + commitMetaData: [CommitMetaData!] @hasInverse(field: buildTool) + createdAt: DateTime! +} + +""" +SourceCodeTool contains the source details about the artifact that was built. +""" +type SourceCodeTool { + "id is randomly assigned" + id: String! @id + createdAt: DateTime! + "scm is the scm tool github/gitlab etc" + scm: String! + "repository is the git remote repository" + repository: String! @search(by: [exact,regexp]) + "branch is the git branch on which the artifact was built" + branch: String! + "headCommit is the checkout out head commit" + headCommit: String + "diffCommits is a comma separated string of the commits between the previous built artifact and the current" + diffCommits: String + licenseName: String + visibility: String + workflowName: String + "parentRepo is populated in case the git repo is a fork" + parentRepo: String + buildTool: BuildTool! +} + +""" +CommitMetaData contains the git commit related details of the source repository . +""" +type CommitMetaData { + "id is randomly assigned" + id: ID! + "commit is a git commit that was used to build an artifact" + commit: String + repository: String + "commitSign tells us whether the commit is signed" + commitSign: Boolean + noOfReviewersConf: Int + reviewerList: [String!] + approverList: [String!] + buildTool: BuildTool! @hasInverse(field: commitMetaData) +} + +type Artifact { + id: String! @id + artifactType: String! @search(by: [exact]) + artifactName: String! @search(by: [exact, regexp]) + artifactTag: String! @search(by: [exact, regexp]) + artifactSha: String! @search(by: [exact]) + scanData: [ArtifactScanData!] + artifactDeployment: [ApplicationDeployment!] @hasInverse(field: artifact) + buildDetails: BuildTool @hasInverse(field: artifactNode) +} + +type ArtifactScanData { + id: String! @id + artifactSha: String! @search(by: [exact]) + tool: String! @search(by: [exact]) + artifactDetails: Artifact @hasInverse(field: scanData) + lastScannedAt: DateTime + createdAt: DateTime + vulnTrackingId: String + components: [Component!] + vulnCriticalCount: Int @search + vulnHighCount: Int @search + vulnMediumCount: Int @search + vulnLowCount: Int @search + vulnInfoCount: Int @search + vulnUnknownCount: Int @search + vulnNoneCount: Int @search + vulnTotalCount: Int @search + sbomUrl: String + artifactLicenseScanUrl: String + artifactSecretScanUrl: String + sourceLicenseScanUrl: String + sourceSecretScanUrl: String + sourceScorecardScanUrl: String + sourceSemgrepHighSeverityScanUrl: String + sourceSemgrepMediumSeverityScanUrl: String + sourceSemgrepLowSeverityScanUrl: String + sourceSnykScanUrl: String + virusTotalUrlScan: String + riskStatus: RiskStatus @search(by: [exact, regexp]) + artifactRunHistory: [RunHistory!] @hasInverse(field: artifactScan) +} + +type Component { + id: String! @id + type: String! + name: String! @search(by: [exact, regexp]) + version: String! @search(by: [exact, regexp]) + licenses: [String!] + purl: String @search(by: [exact]) + cpe: String @search(by: [exact]) + scannedAt: DateTime + vulnerabilities: [Vulnerability!] @hasInverse(field: affects) + artifacts: [ArtifactScanData!] @hasInverse(field: components) +} + +enum Severity { + critical + high + medium + low + info + none + unknown +} + +type Vulnerability { + id: String! @id + parent: String! @search(by: [exact, regexp]) + ratings: Severity @search(by: [exact]) + cwes: [CWE!] + summary: String + detail: String + recommendation: String + published: DateTime + modified: DateTime + createdAt: DateTime @search + cvss: Float @search + priority: String @search(by: [exact, regexp]) + epss: Float @search + cisa_kev: String @search(by: [exact, regexp]) + affects: [Component!] @hasInverse(field: vulnerabilities) +} + +type CWE { + id: String! @id + name: String! + description: String +} + +# Dgraph.Allow-Origin "http://localhost:4200" +# Dgraph.Authorization {"VerificationKey":"","Header":"X-OpsMx-Auth","jwkurl":"http://token-machine:8050/jwk","Namespace":"ssd.opsmx.io","Algo":"","Audience":["ssd.opsmx.io"],"ClosedByDefault":false}`