Skip to content

Commit

Permalink
Add src
Browse files Browse the repository at this point in the history
  • Loading branch information
alisakotliarova authored Sep 15, 2021
1 parent 2dcd041 commit 8fc04bd
Show file tree
Hide file tree
Showing 38 changed files with 5,184 additions and 0 deletions.
299 changes: 299 additions & 0 deletions src/main/resources/application.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,299 @@
########################################
# akka-http-core Reference Config File #
########################################

# This is the reference config file that contains all the default settings.
# Make your edits/overrides in your application.conf.

# Akka HTTP version, checked against the runtime version of Akka HTTP.
# Loaded from generated conf file.
include "akka-http-version"

akka.http {

client {
# The default value of the `User-Agent` header to produce if no
# explicit `User-Agent`-header was included in a request.
# If this value is the empty string and no header was included in
# the request, no `User-Agent` header will be rendered at all.
user-agent-header = akka-http/${akka.http.version}

# The time period within which the TCP connecting process must be completed.
connecting-timeout = 60s

# The time after which an idle connection will be automatically closed.
# Set to `infinite` to completely disable idle timeouts.
idle-timeout = 60 s

# The initial size of the buffer to render the request headers in.
# Can be used for fine-tuning request rendering performance but probably
# doesn't have to be fiddled with in most applications.
request-header-size-hint = 512

# Socket options to set for the listening socket. If a setting is left
# undefined, it will use whatever the default on the system is.
socket-options {
so-receive-buffer-size = undefined
so-send-buffer-size = undefined
so-reuse-address = undefined
so-traffic-class = undefined
tcp-keep-alive = undefined
tcp-oob-inline = undefined
tcp-no-delay = undefined
}

# Modify to tweak parsing settings on the client-side only.
parsing {
# no overrides by default, see `akka.http.parsing` for default values
}

# Enables/disables the logging of unencrypted HTTP traffic to and from the HTTP
# client for debugging reasons.
#
# Note: Use with care. Logging of unencrypted data traffic may expose secret data.
#
# Incoming and outgoing traffic will be logged in hexdump format. To enable logging,
# specify the number of bytes to log per chunk of data (the actual chunking depends
# on implementation details and networking conditions and should be treated as
# arbitrary).
#
# For logging on the server side, see akka.http.server.log-unencrypted-network-bytes.
#
# `off` : no log messages are produced
# Int : determines how many bytes should be logged per data chunk
log-unencrypted-network-bytes = off

websocket {
# periodic keep alive may be implemented using by sending Ping frames
# upon which the other side is expected to reply with a Pong frame,
# or by sending a Pong frame, which serves as unidirectional heartbeat.
# Valid values:
# ping - default, for bi-directional ping/pong keep-alive heartbeating
# pong - for uni-directional pong keep-alive heartbeating
#
# See https://tools.ietf.org/html/rfc6455#section-5.5.2
# and https://tools.ietf.org/html/rfc6455#section-5.5.3 for more information
periodic-keep-alive-mode = ping

# Interval for sending periodic keep-alives
# The frame sent will be the onne configured in akka.http.server.websocket.periodic-keep-alive-mode
# `infinite` by default, or a duration that is the max idle interval after which an keep-alive frame should be sent
periodic-keep-alive-max-idle = infinite
}

# Cancellation in the HTTP streams is delayed by this duration to prevent race conditions between cancellation
# and stream completion / failure. In most cases, the value chosen here should make no difference because
# HTTP streams are loops where completion and failures should propagate immediately and make the handling of
# cancellations redundant.
#
# In most cases, there should be no reason to change this setting.
#
# Set to 0 to disable the delay.
stream-cancellation-delay = 100 millis
}

host-connection-pool {
# The maximum number of parallel connections that a connection pool to a
# single host endpoint is allowed to establish. Must be greater than zero.
max-connections = 4

# The minimum number of parallel connections that a pool should keep alive ("hot").
# If the number of connections is falling below the given threshold, new ones are being spawned.
# You can use this setting to build a hot pool of "always on" connections.
# Default is 0, meaning there might be no active connection at given moment.
# Keep in mind that `min-connections` should be smaller than `max-connections` or equal
min-connections = 0

# The maximum number of times failed requests are attempted again,
# (if the request can be safely retried) before giving up and returning an error.
# Set to zero to completely disable request retries.
max-retries = 5

# The maximum number of open requests accepted into the pool across all
# materializations of any of its client flows.
# Protects against (accidentally) overloading a single pool with too many client flow materializations.
# Note that with N concurrent materializations the max number of open request in the pool
# will never exceed N * max-connections * pipelining-limit.
# Must be a power of 2 and > 0!
max-open-requests = 32

# The maximum duration for a connection to be kept alive
# This amount gets modified by a 10 percent fuzzyness to avoid the simultanous reconnections
# defaults to 'infinite'
# Note that this is only implemented in the new host connection pool
max-connection-lifetime = infinite

# Client-side pipelining is not currently supported. See https://github.com/akka/akka-http/issues/32
pipelining-limit = 1

# The minimum duration to backoff new connection attempts after the previous connection attempt failed.
#
# The pool uses an exponential randomized backoff scheme. After the first failure, the next attempt will only be
# tried after a random duration between the base connection backoff and twice the base connection backoff. If that
# attempt fails as well, the next attempt will be delayed by twice that amount. The total delay is capped using the
# `max-connection-backoff` setting.
#
# The backoff applies for the complete pool. I.e. after one failed connection attempt, further connection attempts
# to that host will backoff for all connections of the pool. After the service recovered, connections will come out
# of backoff one by one due to the random extra backoff time. This is to avoid overloading just recently recovered
# services with new connections ("thundering herd").
#
# Example: base-connection-backoff = 100ms, max-connection-backoff = 10 seconds
# - After 1st failure, backoff somewhere between 100ms and 200ms
# - After 2nd, between 200ms and 400ms
# - After 3rd, between 200ms and 400ms
# - After 4th, between 400ms and 800ms
# - After 5th, between 800ms and 1600ms
# - After 6th, between 1600ms and 3200ms
# - After 7th, between 3200ms and 6400ms
# - After 8th, between 5000ms and 10 seconds (max capped by max-connection-backoff, min by half of that)
# - After 9th, etc., stays between 5000ms and 10 seconds
#
# This setting only applies to the new pool implementation and is ignored for the legacy one.
base-connection-backoff = 100ms

# Maximum backoff duration between failed connection attempts. For more information see the above comment for the
# `base-connection-backoff` setting.
#
# This setting only applies to the new pool implementation and is ignored for the legacy one.
max-connection-backoff = 2 min

# The time after which an idle connection pool (without pending requests)
# will automatically terminate itself. Set to `infinite` to completely disable idle timeouts.
idle-timeout = 30 s

# The pool implementation to use. Currently supported are:
# - legacy: the original 10.0.x pool implementation
# - new: the pool implementation that became the default in 10.1.x and will receive fixes and new features
pool-implementation = new

# The "new" pool implementation will fail a connection early and clear the slot if a response entity was not
# subscribed during the given time period after the response was dispatched. In busy systems the timeout might be
# too tight if a response is not picked up quick enough after it was dispatched by the pool.
response-entity-subscription-timeout = 1.second

# Modify this section to tweak client settings only for host connection pools APIs like `Http().superPool` or
# `Http().singleRequest`.
client = {
# no overrides by default, see `akka.http.client` for default values
}
}

# Modify to tweak default parsing settings.
#
# IMPORTANT:
# Please note that this sections settings can be overridden by the corresponding settings in:
# `akka.http.server.parsing`, `akka.http.client.parsing` or `akka.http.host-connection-pool.client.parsing`.
parsing {
# The limits for the various parts of the HTTP message parser.
max-uri-length = 2k
max-method-length = 16
max-response-reason-length = 64
max-header-name-length = 64
max-header-value-length = 8k
max-header-count = 64
max-chunk-ext-length = 256
max-chunk-size = 1m

# Default maximum content length which should not be exceeded by incoming request entities.
# Can be changed at runtime (to a higher or lower value) via the `HttpEntity::withSizeLimit` method.
# Note that it is not necessarily a problem to set this to a high value as all stream operations
# are always properly backpressured.
# Nevertheless you might want to apply some limit in order to prevent a single client from consuming
# an excessive amount of server resources.
#
# Set to `infinite` to completely disable entity length checks. (Even then you can still apply one
# programmatically via `withSizeLimit`.)
max-content-length = 8m

# The maximum number of bytes to allow when reading the entire entity into memory with `toStrict`
# (which is used by the `toStrictEntity` and `extractStrictEntity` directives)
max-to-strict-bytes = 8m

# Sets the strictness mode for parsing request target URIs.
# The following values are defined:
#
# `strict`: RFC3986-compliant URIs are required,
# a 400 response is triggered on violations
#
# `relaxed`: all visible 7-Bit ASCII chars are allowed
#
uri-parsing-mode = strict

# Sets the parsing mode for parsing cookies.
# The following value are defined:
#
# `rfc6265`: Only RFC6265-compliant cookies are parsed. Surrounding double-quotes are accepted and
# automatically removed. Non-compliant cookies are silently discarded.
# `raw`: Raw parsing allows any non-control character but ';' to appear in a cookie value. There's no further
# post-processing applied, so that the resulting value string may contain any number of whitespace, unicode,
# double quotes, or '=' characters at any position.
# The rules for parsing the cookie name are the same ones from RFC 6265.
#
cookie-parsing-mode = rfc6265

# Enables/disables the logging of warning messages in case an incoming
# message (request or response) contains an HTTP header which cannot be
# parsed into its high-level model class due to incompatible syntax.
# Note that, independently of this settings, akka-http will accept messages
# with such headers as long as the message as a whole would still be legal
# under the HTTP specification even without this header.
# If a header cannot be parsed into a high-level model instance it will be
# provided as a `RawHeader`.
# If logging is enabled it is performed with the configured
# `error-logging-verbosity`.
illegal-header-warnings = on

# Sets the list of headers for which illegal values will *not* cause warning logs to be emitted;
#
# Adding a header name to this setting list disables the logging of warning messages in case an incoming message
# contains an HTTP header which cannot be parsed into its high-level model class due to incompatible syntax.
ignore-illegal-header-for = []

# Parse headers into typed model classes in the Akka Http core layer.
#
# If set to `off`, only essential headers will be parsed into their model classes. All other ones will be provided
# as instances of `RawHeader`. Currently, `Connection`, `Host`, and `Expect` headers will still be provided in their
# typed model. The full list of headers still provided as modeled instances can be found in the source code of
# `akka.http.impl.engine.parsing.HttpHeaderParser.alwaysParsedHeaders`. Note that (regardless of this setting)
# some headers like `Content-Type` are treated specially and will never be provided in the list of headers.
modeled-header-parsing = on

# Configures the verbosity with which message (request or response) parsing
# errors are written to the application log.
#
# Supported settings:
# `off` : no log messages are produced
# `simple`: a condensed single-line message is logged
# `full` : the full error details (potentially spanning several lines) are logged
error-logging-verbosity = full

# Configures the processing mode when encountering illegal characters in
# header value of response.
#
# Supported mode:
# `error` : default mode, throw an ParsingException and terminate the processing
# `warn` : ignore the illegal characters in response header value and log a warning message
# `ignore` : just ignore the illegal characters in response header value
illegal-response-header-value-processing-mode = error

# limits for the number of different values per header type that the
# header cache will hold
header-cache {
default = 12
Content-MD5 = 0
Date = 0
If-Match = 0
If-Modified-Since = 0
If-None-Match = 0
If-Range = 0
If-Unmodified-Since = 0
User-Agent = 32
}

# Enables/disables inclusion of an Tls-Session-Info header in parsed
# messages over Tls transports (i.e., HttpRequest on server side and
# HttpResponse on client side).
tls-session-info-header = off
}
}
13 changes: 13 additions & 0 deletions src/main/scala/org/alvearie/hri/api/BatchLookup.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
/**
* (C) Copyright IBM Corp. 2021
*
* SPDX-License-Identifier: Apache-2.0
*/

package org.alvearie.hri.api

import scala.util.Try

trait BatchLookup {
def getBatchId(tenantId: String, batchId: String): Try[BatchNotification]
}
Loading

0 comments on commit 8fc04bd

Please sign in to comment.